code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def build(optimizer_config, net, name=None, mixed=False, loss_scale=512.0):
"""Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
optimizer_type = optimizer_config.WhichOneof('optimizer')
optimizer = None
if optimizer_type == 'rms_prop_optimizer':
config = optimizer_config.rms_prop_optimizer
optimizer_func = partial(
torch.optim.RMSprop,
alpha=config.decay,
momentum=config.momentum_optimizer_value,
eps=config.epsilon)
if optimizer_type == 'momentum_optimizer':
config = optimizer_config.momentum_optimizer
optimizer_func = partial(
torch.optim.SGD,
momentum=config.momentum_optimizer_value,
eps=config.epsilon)
if optimizer_type == 'adam_optimizer':
config = optimizer_config.adam_optimizer
if optimizer_config.fixed_weight_decay:
optimizer_func = partial(
torch.optim.Adam, betas=(0.9, 0.99), amsgrad=config.amsgrad)
else:
# regular adam
optimizer_func = partial(
torch.optim.Adam, amsgrad=config.amsgrad)
# optimizer = OptimWrapper(optimizer, true_wd=optimizer_config.fixed_weight_decay, wd=config.weight_decay)
optimizer = OptimWrapper.create(
optimizer_func,
3e-3,
get_layer_groups(net),
wd=config.weight_decay,
true_wd=optimizer_config.fixed_weight_decay,
bn_wd=True)
print(hasattr(optimizer, "_amp_stash"), '_amp_stash')
if optimizer is None:
raise ValueError('Optimizer %s not supported.' % optimizer_type)
if optimizer_config.use_moving_average:
raise ValueError('torch don\'t support moving average')
if name is None:
# assign a name to optimizer for checkpoint system
optimizer.name = optimizer_type
else:
optimizer.name = name
return optimizer
|
Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
|
build
|
python
|
traveller59/second.pytorch
|
second/pytorch/builder/optimizer_builder.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/builder/optimizer_builder.py
|
MIT
|
def second_box_encode(boxes, anchors, encode_angle_to_vector=False, smooth_dim=False):
"""box encode for VoxelNet
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, l, w, h, r
anchors ([N, 7] Tensor): anchors
"""
box_ndim = anchors.shape[-1]
cas, cgs = [], []
if box_ndim > 7:
xa, ya, za, wa, la, ha, ra, *cas = torch.split(anchors, 1, dim=-1)
xg, yg, zg, wg, lg, hg, rg, *cgs = torch.split(boxes, 1, dim=-1)
else:
xa, ya, za, wa, la, ha, ra = torch.split(anchors, 1, dim=-1)
xg, yg, zg, wg, lg, hg, rg = torch.split(boxes, 1, dim=-1)
diagonal = torch.sqrt(la**2 + wa**2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / ha
cts = [g - a for g, a in zip(cgs, cas)]
if smooth_dim:
lt = lg / la - 1
wt = wg / wa - 1
ht = hg / ha - 1
else:
lt = torch.log(lg / la)
wt = torch.log(wg / wa)
ht = torch.log(hg / ha)
if encode_angle_to_vector:
rgx = torch.cos(rg)
rgy = torch.sin(rg)
rax = torch.cos(ra)
ray = torch.sin(ra)
rtx = rgx - rax
rty = rgy - ray
return torch.cat([xt, yt, zt, wt, lt, ht, rtx, rty, *cts], dim=-1)
else:
rt = rg - ra
return torch.cat([xt, yt, zt, wt, lt, ht, rt, *cts], dim=-1)
|
box encode for VoxelNet
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, l, w, h, r
anchors ([N, 7] Tensor): anchors
|
second_box_encode
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/box_torch_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/box_torch_ops.py
|
MIT
|
def second_box_decode(box_encodings, anchors, encode_angle_to_vector=False, smooth_dim=False):
"""box decode for VoxelNet in lidar
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
"""
box_ndim = anchors.shape[-1]
cas, cts = [], []
if box_ndim > 7:
xa, ya, za, wa, la, ha, ra, *cas = torch.split(anchors, 1, dim=-1)
if encode_angle_to_vector:
xt, yt, zt, wt, lt, ht, rtx, rty, *cts = torch.split(
box_encodings, 1, dim=-1)
else:
xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1)
else:
xa, ya, za, wa, la, ha, ra = torch.split(anchors, 1, dim=-1)
if encode_angle_to_vector:
xt, yt, zt, wt, lt, ht, rtx, rty = torch.split(
box_encodings, 1, dim=-1)
else:
xt, yt, zt, wt, lt, ht, rt = torch.split(box_encodings, 1, dim=-1)
# za = za + ha / 2
# xt, yt, zt, wt, lt, ht, rt = torch.split(box_encodings, 1, dim=-1)
diagonal = torch.sqrt(la**2 + wa**2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * ha + za
if smooth_dim:
lg = (lt + 1) * la
wg = (wt + 1) * wa
hg = (ht + 1) * ha
else:
lg = torch.exp(lt) * la
wg = torch.exp(wt) * wa
hg = torch.exp(ht) * ha
if encode_angle_to_vector:
rax = torch.cos(ra)
ray = torch.sin(ra)
rgx = rtx + rax
rgy = rty + ray
rg = torch.atan2(rgy, rgx)
else:
rg = rt + ra
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, wg, lg, hg, rg, *cgs], dim=-1)
|
box decode for VoxelNet in lidar
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
|
second_box_decode
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/box_torch_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/box_torch_ops.py
|
MIT
|
def bev_box_encode(boxes, anchors, encode_angle_to_vector=False, smooth_dim=False):
"""box encode for VoxelNet
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, l, w, h, r
anchors ([N, 7] Tensor): anchors
"""
xa, ya, wa, la, ra = torch.split(anchors, 1, dim=-1)
xg, yg, wg, lg, rg = torch.split(boxes, 1, dim=-1)
diagonal = torch.sqrt(la**2 + wa**2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
if smooth_dim:
lt = lg / la - 1
wt = wg / wa - 1
else:
lt = torch.log(lg / la)
wt = torch.log(wg / wa)
if encode_angle_to_vector:
rgx = torch.cos(rg)
rgy = torch.sin(rg)
rax = torch.cos(ra)
ray = torch.sin(ra)
rtx = rgx - rax
rty = rgy - ray
return torch.cat([xt, yt, wt, lt, rtx, rty], dim=-1)
else:
rt = rg - ra
return torch.cat([xt, yt, wt, lt, rt], dim=-1)
# rt = rg - ra
# return torch.cat([xt, yt, zt, wt, lt, ht, rt], dim=-1)
|
box encode for VoxelNet
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, l, w, h, r
anchors ([N, 7] Tensor): anchors
|
bev_box_encode
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/box_torch_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/box_torch_ops.py
|
MIT
|
def bev_box_decode(box_encodings, anchors, encode_angle_to_vector=False, smooth_dim=False):
"""box decode for VoxelNet in lidar
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
"""
xa, ya, wa, la, ra = torch.split(anchors, 1, dim=-1)
if encode_angle_to_vector:
xt, yt, wt, lt, rtx, rty = torch.split(
box_encodings, 1, dim=-1)
else:
xt, yt, wt, lt, rt = torch.split(box_encodings, 1, dim=-1)
# xt, yt, zt, wt, lt, ht, rt = torch.split(box_encodings, 1, dim=-1)
diagonal = torch.sqrt(la**2 + wa**2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
if smooth_dim:
lg = (lt + 1) * la
wg = (wt + 1) * wa
else:
lg = torch.exp(lt) * la
wg = torch.exp(wt) * wa
if encode_angle_to_vector:
rax = torch.cos(ra)
ray = torch.sin(ra)
rgx = rtx + rax
rgy = rty + ray
rg = torch.atan2(rgy, rgx)
else:
rg = rt + ra
return torch.cat([xg, yg, wg, lg, rg], dim=-1)
|
box decode for VoxelNet in lidar
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
|
bev_box_decode
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/box_torch_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/box_torch_ops.py
|
MIT
|
def corners_nd(dims, origin=0.5):
"""generate relative box corners based on length per dim and
origin point.
Args:
dims (float array, shape=[N, ndim]): array of length per dim
origin (list or array or float): origin point relate to smallest point.
dtype (output dtype, optional): Defaults to np.float32
Returns:
float array, shape=[N, 2 ** ndim, ndim]: returned corners.
point layout example: (2d) x0y0, x0y1, x1y0, x1y1;
(3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
where x0 < x1, y0 < y1, z0 < z1
"""
ndim = int(dims.shape[1])
dtype = torch_to_np_dtype(dims.dtype)
if isinstance(origin, float):
origin = [origin] * ndim
corners_norm = np.stack(
np.unravel_index(np.arange(2**ndim), [2] * ndim), axis=1).astype(dtype)
# now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1
# (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
# so need to convert to a format which is convenient to do other computing.
# for 2d boxes, format is clockwise start from minimum point
# for 3d boxes, please draw them by your hand.
if ndim == 2:
# generate clockwise box corners
corners_norm = corners_norm[[0, 1, 3, 2]]
elif ndim == 3:
corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]
corners_norm = corners_norm - np.array(origin, dtype=dtype)
corners_norm = torch.from_numpy(corners_norm).type_as(dims)
corners = dims.view(-1, 1, ndim) * corners_norm.view(1, 2**ndim, ndim)
return corners
|
generate relative box corners based on length per dim and
origin point.
Args:
dims (float array, shape=[N, ndim]): array of length per dim
origin (list or array or float): origin point relate to smallest point.
dtype (output dtype, optional): Defaults to np.float32
Returns:
float array, shape=[N, 2 ** ndim, ndim]: returned corners.
point layout example: (2d) x0y0, x0y1, x1y0, x1y1;
(3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
where x0 < x1, y0 < y1, z0 < z1
|
corners_nd
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/box_torch_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/box_torch_ops.py
|
MIT
|
def rotation_2d(points, angles):
"""rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angles (float array, shape=[N]): rotation angle.
Returns:
float array: same shape as points
"""
rot_sin = torch.sin(angles)
rot_cos = torch.cos(angles)
rot_mat_T = torch.stack(
[tstack([rot_cos, -rot_sin]),
tstack([rot_sin, rot_cos])])
return torch.einsum('aij,jka->aik', (points, rot_mat_T))
|
rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angles (float array, shape=[N]): rotation angle.
Returns:
float array: same shape as points
|
rotation_2d
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/box_torch_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/box_torch_ops.py
|
MIT
|
def center_to_corner_box3d(centers,
dims,
angles,
origin=(0.5, 0.5, 0.5),
axis=1):
"""convert kitti locations, dimensions and angles to corners
Args:
centers (float array, shape=[N, 3]): locations in kitti label file.
dims (float array, shape=[N, 3]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
origin (list or array or float): origin point relate to smallest point.
use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar.
axis (int): rotation axis. 1 for camera and 2 for lidar.
Returns:
[type]: [description]
"""
# 'length' in kitti format is in x axis.
# yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)
# center in kitti format is [0.5, 1.0, 0.5] in xyz.
corners = corners_nd(dims, origin=origin)
# corners: [N, 8, 3]
corners = rotation_3d_in_axis(corners, angles, axis=axis)
corners += centers.view(-1, 1, 3)
return corners
|
convert kitti locations, dimensions and angles to corners
Args:
centers (float array, shape=[N, 3]): locations in kitti label file.
dims (float array, shape=[N, 3]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
origin (list or array or float): origin point relate to smallest point.
use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar.
axis (int): rotation axis. 1 for camera and 2 for lidar.
Returns:
[type]: [description]
|
center_to_corner_box3d
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/box_torch_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/box_torch_ops.py
|
MIT
|
def center_to_corner_box2d(centers, dims, angles=None, origin=0.5):
"""convert kitti locations, dimensions and angles to corners
Args:
centers (float array, shape=[N, 2]): locations in kitti label file.
dims (float array, shape=[N, 2]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
Returns:
[type]: [description]
"""
# 'length' in kitti format is in x axis.
# xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)
# center in kitti format is [0.5, 1.0, 0.5] in xyz.
corners = corners_nd(dims, origin=origin)
# corners: [N, 4, 2]
if angles is not None:
corners = rotation_2d(corners, angles)
corners += centers.view(-1, 1, 2)
return corners
|
convert kitti locations, dimensions and angles to corners
Args:
centers (float array, shape=[N, 2]): locations in kitti label file.
dims (float array, shape=[N, 2]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
Returns:
[type]: [description]
|
center_to_corner_box2d
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/box_torch_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/box_torch_ops.py
|
MIT
|
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights,
class_indices=None):
""" Args:
input [batch_num, class_num]:
The direct prediction of classification fc layer.
target [batch_num, class_num]:
Binary target (0 or 1) for each sample each class. The value is -1
when the sample is ignored.
"""
input = prediction_tensor
target = target_tensor
batch_size = prediction_tensor.shape[0]
num_anchors = prediction_tensor.shape[1]
num_class = prediction_tensor.shape[2]
edges = self.edges
weights_ghm = torch.zeros_like(input).view(-1, num_class)
per_entry_cross_ent = (_sigmoid_cross_entropy_with_logits(
labels=target_tensor, logits=prediction_tensor))
# gradient length
g = torch.abs(input.sigmoid().detach() - target).view(-1, num_class)
valid = weights.view(-1, 1).expand(-1, num_class) > 0
num_examples = max(valid.float().sum().item(), 1.0)
num_valid_bins = 0 # n valid bins
self.count -= 1
num_bins = []
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i+1]) & valid
num_in_bin = inds.sum().item()
num_bins.append(num_in_bin)
if num_in_bin > 0:
if self.momentum > 0:
self.acc_sum[i] = self.momentum * self.acc_sum[i] \
+ (1 - self.momentum) * num_in_bin
weights_ghm[inds] = num_examples / self.acc_sum[i]
else:
weights_ghm[inds] = num_examples / num_in_bin
num_valid_bins += 1
if self.count <= 0:
print("GHMC loss bins:", num_bins)
self.count = 50
if num_valid_bins > 0:
weights_ghm = weights_ghm / num_valid_bins
return per_entry_cross_ent * weights_ghm.view(batch_size, num_anchors, num_class) / num_examples
|
Args:
input [batch_num, class_num]:
The direct prediction of classification fc layer.
target [batch_num, class_num]:
Binary target (0 or 1) for each sample each class. The value is -1
when the sample is ignored.
|
_compute_loss
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/ghm_loss.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/ghm_loss.py
|
MIT
|
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights):
""" Args:
input [batch_num, class_num]:
The direct prediction of classification fc layer.
target [batch_num, class_num]:
Binary target (0 or 1) for each sample each class. The value is -1
when the sample is ignored.
"""
# ASL1 loss
diff = prediction_tensor - target_tensor
loss = torch.sqrt(diff * diff + self.mu * self.mu) - self.mu
batch_size = prediction_tensor.shape[0]
num_anchors = prediction_tensor.shape[1]
num_codes = prediction_tensor.shape[2]
# gradient length
g = torch.abs(diff / torch.sqrt(self.mu * self.mu + diff * diff)).detach().view(-1, num_codes)
weights_ghm = torch.zeros_like(g)
valid = weights.view(-1, 1).expand(-1, num_codes) > 0
# print(g.shape, prediction_tensor.shape, valid.shape)
num_examples = max(valid.float().sum().item() / num_codes, 1.0)
num_valid_bins = 0 # n: valid bins
for i in range(self.bins):
inds = (g >= self.edges[i]) & (g < self.edges[i+1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
num_valid_bins += 1
if self.momentum > 0:
self.acc_sum[i] = self.momentum * self.acc_sum[i] \
+ (1 - self.momentum) * num_in_bin
weights_ghm[inds] = num_examples / self.acc_sum[i]
else:
weights_ghm[inds] = num_examples / num_in_bin
if num_valid_bins > 0:
weights_ghm /= num_valid_bins
weights_ghm = weights_ghm.view(batch_size, num_anchors, num_codes)
loss = loss * weights_ghm / num_examples
return loss
|
Args:
input [batch_num, class_num]:
The direct prediction of classification fc layer.
target [batch_num, class_num]:
Binary target (0 or 1) for each sample each class. The value is -1
when the sample is ignored.
|
_compute_loss
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/ghm_loss.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/ghm_loss.py
|
MIT
|
def indices_to_dense_vector(indices,
size,
indices_value=1.,
default_value=0,
dtype=np.float32):
"""Creates dense vector with indices set to specific value and rest to zeros.
This function exists because it is unclear if it is safe to use
tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
with indices which are not ordered.
This function accepts a dynamic size (e.g. tf.shape(tensor)[0])
Args:
indices: 1d Tensor with integer indices which are to be set to
indices_values.
size: scalar with size (integer) of output Tensor.
indices_value: values of elements specified by indices in the output vector
default_value: values of other elements in the output vector.
dtype: data type.
Returns:
dense 1D Tensor of shape [size] with indices set to indices_values and the
rest set to default_value.
"""
dense = torch.zeros(size).fill_(default_value)
dense[indices] = indices_value
return dense
|
Creates dense vector with indices set to specific value and rest to zeros.
This function exists because it is unclear if it is safe to use
tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
with indices which are not ordered.
This function accepts a dynamic size (e.g. tf.shape(tensor)[0])
Args:
indices: 1d Tensor with integer indices which are to be set to
indices_values.
size: scalar with size (integer) of output Tensor.
indices_value: values of elements specified by indices in the output vector
default_value: values of other elements in the output vector.
dtype: data type.
Returns:
dense 1D Tensor of shape [size] with indices set to indices_values and the
rest set to default_value.
|
indices_to_dense_vector
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/losses.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/losses.py
|
MIT
|
def __call__(self,
prediction_tensor,
target_tensor,
ignore_nan_targets=False,
scope=None,
**params):
"""Call the loss function.
Args:
prediction_tensor: an N-d tensor of shape [batch, anchors, ...]
representing predicted quantities.
target_tensor: an N-d tensor of shape [batch, anchors, ...] representing
regression or classification targets.
ignore_nan_targets: whether to ignore nan targets in the loss computation.
E.g. can be used if the target tensor is missing groundtruth data that
shouldn't be factored into the loss.
scope: Op scope name. Defaults to 'Loss' if None.
**params: Additional keyword arguments for specific implementations of
the Loss.
Returns:
loss: a tensor representing the value of the loss function.
"""
if ignore_nan_targets:
target_tensor = torch.where(torch.isnan(target_tensor),
prediction_tensor,
target_tensor)
return self._compute_loss(prediction_tensor, target_tensor, **params)
|
Call the loss function.
Args:
prediction_tensor: an N-d tensor of shape [batch, anchors, ...]
representing predicted quantities.
target_tensor: an N-d tensor of shape [batch, anchors, ...] representing
regression or classification targets.
ignore_nan_targets: whether to ignore nan targets in the loss computation.
E.g. can be used if the target tensor is missing groundtruth data that
shouldn't be factored into the loss.
scope: Op scope name. Defaults to 'Loss' if None.
**params: Additional keyword arguments for specific implementations of
the Loss.
Returns:
loss: a tensor representing the value of the loss function.
|
__call__
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/losses.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/losses.py
|
MIT
|
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors] tensor
representing the value of the loss function.
"""
diff = prediction_tensor - target_tensor
if self._code_weights is not None:
self._code_weights = self._code_weights.type_as(prediction_tensor).to(prediction_tensor.device)
self._code_weights = self._code_weights.view(1, 1, -1)
diff = self._code_weights * diff
weighted_diff = diff * weights.unsqueeze(-1)
square_diff = 0.5 * weighted_diff * weighted_diff
return square_diff.sum(2)
|
Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors] tensor
representing the value of the loss function.
|
_compute_loss
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/losses.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/losses.py
|
MIT
|
def _compute_loss(self, prediction_tensor, target_tensor, weights=None):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors] tensor
representing the value of the loss function.
"""
diff = prediction_tensor - target_tensor
if self._code_weights is not None:
code_weights = self._code_weights.type_as(prediction_tensor).to(target_tensor.device)
diff = code_weights.view(1, 1, -1) * diff
abs_diff = torch.abs(diff)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (self._sigma**2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * self._sigma, 2) \
+ (abs_diff - 0.5 / (self._sigma**2)) * (1. - abs_diff_lt_1)
if self._codewise:
anchorwise_smooth_l1norm = loss
if weights is not None:
anchorwise_smooth_l1norm *= weights.unsqueeze(-1)
else:
anchorwise_smooth_l1norm = torch.sum(loss, 2)# * weights
if weights is not None:
anchorwise_smooth_l1norm *= weights
return anchorwise_smooth_l1norm
|
Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors] tensor
representing the value of the loss function.
|
_compute_loss
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/losses.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/losses.py
|
MIT
|
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights,
class_indices=None):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
weights = weights.unsqueeze(-1)
if class_indices is not None:
weights *= indices_to_dense_vector(class_indices,
prediction_tensor.shape[2]).view(1, 1, -1).type_as(prediction_tensor)
per_entry_cross_ent = (_sigmoid_cross_entropy_with_logits(
labels=target_tensor, logits=prediction_tensor))
return per_entry_cross_ent * weights
|
Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
|
_compute_loss
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/losses.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/losses.py
|
MIT
|
def __init__(self, gamma=2.0, alpha=0.25):
"""Constructor.
Args:
gamma: exponent of the modulating factor (1 - p_t) ^ gamma.
alpha: optional alpha weighting factor to balance positives vs negatives.
all_zero_negative: bool. if True, will treat all zero as background.
else, will treat first label as background. only affect alpha.
"""
self._alpha = alpha
self._gamma = gamma
|
Constructor.
Args:
gamma: exponent of the modulating factor (1 - p_t) ^ gamma.
alpha: optional alpha weighting factor to balance positives vs negatives.
all_zero_negative: bool. if True, will treat all zero as background.
else, will treat first label as background. only affect alpha.
|
__init__
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/losses.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/losses.py
|
MIT
|
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights,
class_indices=None):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
weights = weights.unsqueeze(2)
if class_indices is not None:
weights *= indices_to_dense_vector(class_indices,
prediction_tensor.shape[2]).view(1, 1, -1).type_as(prediction_tensor)
per_entry_cross_ent = (_sigmoid_cross_entropy_with_logits(
labels=target_tensor, logits=prediction_tensor))
prediction_probabilities = torch.sigmoid(prediction_tensor)
p_t = ((target_tensor * prediction_probabilities) +
((1 - target_tensor) * (1 - prediction_probabilities)))
modulating_factor = 1.0
if self._gamma:
modulating_factor = torch.pow(1.0 - p_t, self._gamma)
alpha_weight_factor = 1.0
if self._alpha is not None:
alpha_weight_factor = (target_tensor * self._alpha +
(1 - target_tensor) * (1 - self._alpha))
focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor *
per_entry_cross_ent)
return focal_cross_entropy_loss * weights
|
Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
|
_compute_loss
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/losses.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/losses.py
|
MIT
|
def __init__(self, gamma=2.0, alpha=0.25):
"""Constructor.
Args:
gamma: exponent of the modulating factor (1 - p_t) ^ gamma.
alpha: optional alpha weighting factor to balance positives vs negatives.
"""
self._alpha = alpha
self._gamma = gamma
|
Constructor.
Args:
gamma: exponent of the modulating factor (1 - p_t) ^ gamma.
alpha: optional alpha weighting factor to balance positives vs negatives.
|
__init__
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/losses.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/losses.py
|
MIT
|
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights,
class_indices=None):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
weights = weights.unsqueeze(2)
if class_indices is not None:
weights *= indices_to_dense_vector(class_indices,
prediction_tensor.shape[2]).view(1, 1, -1).type_as(prediction_tensor)
per_entry_cross_ent = (_softmax_cross_entropy_with_logits(
labels=target_tensor, logits=prediction_tensor))
# convert [N, num_anchors] to [N, num_anchors, num_classes]
per_entry_cross_ent = per_entry_cross_ent.unsqueeze(-1) * target_tensor
prediction_probabilities = F.softmax(prediction_tensor, dim=-1)
p_t = ((target_tensor * prediction_probabilities) +
((1 - target_tensor) * (1 - prediction_probabilities)))
modulating_factor = 1.0
if self._gamma:
modulating_factor = torch.pow(1.0 - p_t, self._gamma)
alpha_weight_factor = 1.0
if self._alpha is not None:
alpha_weight_factor = torch.where(target_tensor[..., 0] == 1,
torch.tensor(1 - self._alpha).type_as(per_entry_cross_ent),
torch.tensor(self._alpha).type_as(per_entry_cross_ent))
focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor *
per_entry_cross_ent)
return focal_cross_entropy_loss * weights
|
Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
|
_compute_loss
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/losses.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/losses.py
|
MIT
|
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors]
representing the value of the loss function.
"""
num_classes = prediction_tensor.shape[-1]
prediction_tensor = torch.div(
prediction_tensor, self._logit_scale)
per_row_cross_ent = (_softmax_cross_entropy_with_logits(
labels=target_tensor.view(-1, num_classes),
logits=prediction_tensor.view(-1, num_classes)))
return per_row_cross_ent.view(weights.shape) * weights
|
Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors]
representing the value of the loss function.
|
_compute_loss
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/losses.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/losses.py
|
MIT
|
def __init__(self, alpha, bootstrap_type='soft'):
"""Constructor.
Args:
alpha: a float32 scalar tensor between 0 and 1 representing interpolation
weight
bootstrap_type: set to either 'hard' or 'soft' (default)
Raises:
ValueError: if bootstrap_type is not either 'hard' or 'soft'
"""
if bootstrap_type != 'hard' and bootstrap_type != 'soft':
raise ValueError('Unrecognized bootstrap_type: must be one of '
'\'hard\' or \'soft.\'')
self._alpha = alpha
self._bootstrap_type = bootstrap_type
|
Constructor.
Args:
alpha: a float32 scalar tensor between 0 and 1 representing interpolation
weight
bootstrap_type: set to either 'hard' or 'soft' (default)
Raises:
ValueError: if bootstrap_type is not either 'hard' or 'soft'
|
__init__
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/losses.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/losses.py
|
MIT
|
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
if self._bootstrap_type == 'soft':
bootstrap_target_tensor = self._alpha * target_tensor + (
1.0 - self._alpha) * torch.sigmoid(prediction_tensor)
else:
bootstrap_target_tensor = self._alpha * target_tensor + (
1.0 - self._alpha) * (torch.sigmoid(prediction_tensor) > 0.5).float()
per_entry_cross_ent = (_sigmoid_cross_entropy_with_logits(
labels=bootstrap_target_tensor, logits=prediction_tensor))
return per_entry_cross_ent * weights.unsqueeze(2)
|
Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
|
_compute_loss
|
python
|
traveller59/second.pytorch
|
second/pytorch/core/losses.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/core/losses.py
|
MIT
|
def __init__(self,
in_channels,
out_channels,
use_norm=True,
last_layer=False):
"""
Pillar Feature Net Layer.
The Pillar Feature Net could be composed of a series of these layers, but the PointPillars paper results only
used a single PFNLayer. This layer performs a similar role as second.pytorch.voxelnet.VFELayer.
:param in_channels: <int>. Number of input channels.
:param out_channels: <int>. Number of output channels.
:param use_norm: <bool>. Whether to include BatchNorm.
:param last_layer: <bool>. If last_layer, there is no concatenation of features.
"""
super().__init__()
self.name = 'PFNLayer'
self.last_vfe = last_layer
if not self.last_vfe:
out_channels = out_channels // 2
self.units = out_channels
if use_norm:
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Linear = change_default_args(bias=False)(nn.Linear)
else:
BatchNorm1d = Empty
Linear = change_default_args(bias=True)(nn.Linear)
self.linear = Linear(in_channels, self.units)
self.norm = BatchNorm1d(self.units)
|
Pillar Feature Net Layer.
The Pillar Feature Net could be composed of a series of these layers, but the PointPillars paper results only
used a single PFNLayer. This layer performs a similar role as second.pytorch.voxelnet.VFELayer.
:param in_channels: <int>. Number of input channels.
:param out_channels: <int>. Number of output channels.
:param use_norm: <bool>. Whether to include BatchNorm.
:param last_layer: <bool>. If last_layer, there is no concatenation of features.
|
__init__
|
python
|
traveller59/second.pytorch
|
second/pytorch/models/pointpillars.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/models/pointpillars.py
|
MIT
|
def __init__(self,
num_input_features=4,
use_norm=True,
num_filters=(64, ),
with_distance=False,
voxel_size=(0.2, 0.2, 4),
pc_range=(0, -40, -3, 70.4, 40, 1)):
"""
Pillar Feature Net.
The network prepares the pillar features and performs forward pass through PFNLayers. This net performs a
similar role to SECOND's second.pytorch.voxelnet.VoxelFeatureExtractor.
:param num_input_features: <int>. Number of input features, either x, y, z or x, y, z, r.
:param use_norm: <bool>. Whether to include BatchNorm.
:param num_filters: (<int>: N). Number of features in each of the N PFNLayers.
:param with_distance: <bool>. Whether to include Euclidean distance to points.
:param voxel_size: (<float>: 3). Size of voxels, only utilize x and y size.
:param pc_range: (<float>: 6). Point cloud range, only utilize x and y min.
"""
super().__init__()
self.name = 'PillarFeatureNetOld'
assert len(num_filters) > 0
num_input_features += 5
if with_distance:
num_input_features += 1
self._with_distance = with_distance
# Create PillarFeatureNetOld layers
num_filters = [num_input_features] + list(num_filters)
pfn_layers = []
for i in range(len(num_filters) - 1):
in_filters = num_filters[i]
out_filters = num_filters[i + 1]
if i < len(num_filters) - 2:
last_layer = False
else:
last_layer = True
pfn_layers.append(
PFNLayer(
in_filters, out_filters, use_norm, last_layer=last_layer))
self.pfn_layers = nn.ModuleList(pfn_layers)
# Need pillar (voxel) size and x/y offset in order to calculate pillar offset
self.vx = voxel_size[0]
self.vy = voxel_size[1]
self.x_offset = self.vx / 2 + pc_range[0]
self.y_offset = self.vy / 2 + pc_range[1]
|
Pillar Feature Net.
The network prepares the pillar features and performs forward pass through PFNLayers. This net performs a
similar role to SECOND's second.pytorch.voxelnet.VoxelFeatureExtractor.
:param num_input_features: <int>. Number of input features, either x, y, z or x, y, z, r.
:param use_norm: <bool>. Whether to include BatchNorm.
:param num_filters: (<int>: N). Number of features in each of the N PFNLayers.
:param with_distance: <bool>. Whether to include Euclidean distance to points.
:param voxel_size: (<float>: 3). Size of voxels, only utilize x and y size.
:param pc_range: (<float>: 6). Point cloud range, only utilize x and y min.
|
__init__
|
python
|
traveller59/second.pytorch
|
second/pytorch/models/pointpillars.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/models/pointpillars.py
|
MIT
|
def __init__(self,
num_input_features=4,
use_norm=True,
num_filters=(64, ),
with_distance=False,
voxel_size=(0.2, 0.2, 4),
pc_range=(0, -40, -3, 70.4, 40, 1)):
"""
Pillar Feature Net.
The network prepares the pillar features and performs forward pass through PFNLayers. This net performs a
similar role to SECOND's second.pytorch.voxelnet.VoxelFeatureExtractor.
:param num_input_features: <int>. Number of input features, either x, y, z or x, y, z, r.
:param use_norm: <bool>. Whether to include BatchNorm.
:param num_filters: (<int>: N). Number of features in each of the N PFNLayers.
:param with_distance: <bool>. Whether to include Euclidean distance to points.
:param voxel_size: (<float>: 3). Size of voxels, only utilize x and y size.
:param pc_range: (<float>: 6). Point cloud range, only utilize x and y min.
"""
super().__init__()
self.name = 'PillarFeatureNetOld'
assert len(num_filters) > 0
num_input_features += 5
if with_distance:
num_input_features += 1
self._with_distance = with_distance
# Create PillarFeatureNetOld layers
num_filters = [num_input_features] + list(num_filters)
pfn_layers = []
for i in range(len(num_filters) - 1):
in_filters = num_filters[i]
out_filters = num_filters[i + 1]
if i < len(num_filters) - 2:
last_layer = False
else:
last_layer = True
pfn_layers.append(
PFNLayer(
in_filters, out_filters, use_norm, last_layer=last_layer))
self.pfn_layers = nn.ModuleList(pfn_layers)
# Need pillar (voxel) size and x/y offset in order to calculate pillar offset
self.vx = voxel_size[0]
self.vy = voxel_size[1]
self.x_offset = self.vx / 2 + pc_range[0]
self.y_offset = self.vy / 2 + pc_range[1]
|
Pillar Feature Net.
The network prepares the pillar features and performs forward pass through PFNLayers. This net performs a
similar role to SECOND's second.pytorch.voxelnet.VoxelFeatureExtractor.
:param num_input_features: <int>. Number of input features, either x, y, z or x, y, z, r.
:param use_norm: <bool>. Whether to include BatchNorm.
:param num_filters: (<int>: N). Number of features in each of the N PFNLayers.
:param with_distance: <bool>. Whether to include Euclidean distance to points.
:param voxel_size: (<float>: 3). Size of voxels, only utilize x and y size.
:param pc_range: (<float>: 6). Point cloud range, only utilize x and y min.
|
__init__
|
python
|
traveller59/second.pytorch
|
second/pytorch/models/pointpillars.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/models/pointpillars.py
|
MIT
|
def __init__(self,
num_input_features=4,
use_norm=True,
num_filters=(64, ),
with_distance=False,
voxel_size=(0.2, 0.2, 4),
pc_range=(0, -40, -3, 70.4, 40, 1)):
"""
Pillar Feature Net.
The network prepares the pillar features and performs forward pass through PFNLayers. This net performs a
similar role to SECOND's second.pytorch.voxelnet.VoxelFeatureExtractor.
:param num_input_features: <int>. Number of input features, either x, y, z or x, y, z, r.
:param use_norm: <bool>. Whether to include BatchNorm.
:param num_filters: (<int>: N). Number of features in each of the N PFNLayers.
:param with_distance: <bool>. Whether to include Euclidean distance to points.
:param voxel_size: (<float>: 3). Size of voxels, only utilize x and y size.
:param pc_range: (<float>: 6). Point cloud range, only utilize x and y min.
"""
super().__init__()
self.name = 'PillarFeatureNetRadius'
assert len(num_filters) > 0
num_input_features += 5
num_input_features -= 1 # radius xy->r, z->z
if with_distance:
num_input_features += 1
self._with_distance = with_distance
# Create PillarFeatureNetOld layers
num_filters = [num_input_features] + list(num_filters)
pfn_layers = []
for i in range(len(num_filters) - 1):
in_filters = num_filters[i]
out_filters = num_filters[i + 1]
if i < len(num_filters) - 2:
last_layer = False
else:
last_layer = True
pfn_layers.append(
PFNLayer(
in_filters, out_filters, use_norm, last_layer=last_layer))
self.pfn_layers = nn.ModuleList(pfn_layers)
# Need pillar (voxel) size and x/y offset in order to calculate pillar offset
self.vx = voxel_size[0]
self.vy = voxel_size[1]
self.x_offset = self.vx / 2 + pc_range[0]
self.y_offset = self.vy / 2 + pc_range[1]
|
Pillar Feature Net.
The network prepares the pillar features and performs forward pass through PFNLayers. This net performs a
similar role to SECOND's second.pytorch.voxelnet.VoxelFeatureExtractor.
:param num_input_features: <int>. Number of input features, either x, y, z or x, y, z, r.
:param use_norm: <bool>. Whether to include BatchNorm.
:param num_filters: (<int>: N). Number of features in each of the N PFNLayers.
:param with_distance: <bool>. Whether to include Euclidean distance to points.
:param voxel_size: (<float>: 3). Size of voxels, only utilize x and y size.
:param pc_range: (<float>: 6). Point cloud range, only utilize x and y min.
|
__init__
|
python
|
traveller59/second.pytorch
|
second/pytorch/models/pointpillars.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/models/pointpillars.py
|
MIT
|
def __init__(self,
num_input_features=4,
use_norm=True,
num_filters=(64, ),
with_distance=False,
voxel_size=(0.2, 0.2, 4),
pc_range=(0, -40, -3, 70.4, 40, 1)):
"""
Pillar Feature Net.
The network prepares the pillar features and performs forward pass through PFNLayers. This net performs a
similar role to SECOND's second.pytorch.voxelnet.VoxelFeatureExtractor.
:param num_input_features: <int>. Number of input features, either x, y, z or x, y, z, r.
:param use_norm: <bool>. Whether to include BatchNorm.
:param num_filters: (<int>: N). Number of features in each of the N PFNLayers.
:param with_distance: <bool>. Whether to include Euclidean distance to points.
:param voxel_size: (<float>: 3). Size of voxels, only utilize x and y size.
:param pc_range: (<float>: 6). Point cloud range, only utilize x and y min.
"""
super().__init__()
self.name = 'PillarFeatureNetRadiusHeight'
assert len(num_filters) > 0
num_input_features += 6
num_input_features -= 1 # radius xy->r, z->z
if with_distance:
num_input_features += 1
self._with_distance = with_distance
# Create PillarFeatureNetOld layers
num_filters = [num_input_features] + list(num_filters)
pfn_layers = []
for i in range(len(num_filters) - 1):
in_filters = num_filters[i]
out_filters = num_filters[i + 1]
if i < len(num_filters) - 2:
last_layer = False
else:
last_layer = True
pfn_layers.append(
PFNLayer(
in_filters, out_filters, use_norm, last_layer=last_layer))
self.pfn_layers = nn.ModuleList(pfn_layers)
# Need pillar (voxel) size and x/y offset in order to calculate pillar offset
self.vx = voxel_size[0]
self.vy = voxel_size[1]
self.x_offset = self.vx / 2 + pc_range[0]
self.y_offset = self.vy / 2 + pc_range[1]
|
Pillar Feature Net.
The network prepares the pillar features and performs forward pass through PFNLayers. This net performs a
similar role to SECOND's second.pytorch.voxelnet.VoxelFeatureExtractor.
:param num_input_features: <int>. Number of input features, either x, y, z or x, y, z, r.
:param use_norm: <bool>. Whether to include BatchNorm.
:param num_filters: (<int>: N). Number of features in each of the N PFNLayers.
:param with_distance: <bool>. Whether to include Euclidean distance to points.
:param voxel_size: (<float>: 3). Size of voxels, only utilize x and y size.
:param pc_range: (<float>: 6). Point cloud range, only utilize x and y min.
|
__init__
|
python
|
traveller59/second.pytorch
|
second/pytorch/models/pointpillars.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/models/pointpillars.py
|
MIT
|
def __init__(self,
output_shape,
use_norm=True,
num_input_features=64,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddle2K'):
"""
Point Pillar's Scatter.
Converts learned features from dense tensor to sparse pseudo image. This replaces SECOND's
second.pytorch.voxelnet.SparseMiddleExtractor.
:param output_shape: ([int]: 4). Required output shape of features.
:param num_input_features: <int>. Number of input features.
"""
super().__init__()
self.name = 'PointPillarsScatter'
self.output_shape = output_shape
self.ny = output_shape[2]
self.nx = output_shape[3]
self.nchannels = num_input_features
|
Point Pillar's Scatter.
Converts learned features from dense tensor to sparse pseudo image. This replaces SECOND's
second.pytorch.voxelnet.SparseMiddleExtractor.
:param output_shape: ([int]: 4). Required output shape of features.
:param num_input_features: <int>. Number of input features.
|
__init__
|
python
|
traveller59/second.pytorch
|
second/pytorch/models/pointpillars.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/models/pointpillars.py
|
MIT
|
def __init__(self,
use_norm=True,
num_class=2,
layer_nums=(3, 5, 5),
layer_strides=(2, 2, 2),
num_filters=(128, 128, 256),
upsample_strides=(1, 2, 4),
num_upsample_filters=(256, 256, 256),
num_input_features=128,
num_anchor_per_loc=2,
encode_background_as_zeros=True,
use_direction_classifier=True,
use_groupnorm=False,
num_groups=32,
box_code_size=7,
num_direction_bins=2,
name='rpn'):
"""deprecated. exists for checkpoint backward compilability (SECOND v1.0)
"""
super(RPN, self).__init__()
self._num_anchor_per_loc = num_anchor_per_loc
self._use_direction_classifier = use_direction_classifier
assert len(layer_nums) == 3
assert len(layer_strides) == len(layer_nums)
assert len(num_filters) == len(layer_nums)
assert len(upsample_strides) == len(layer_nums)
assert len(num_upsample_filters) == len(layer_nums)
upsample_strides = [
np.round(u).astype(np.int64) for u in upsample_strides
]
factors = []
for i in range(len(layer_nums)):
assert int(np.prod(
layer_strides[:i + 1])) % upsample_strides[i] == 0
factors.append(
np.prod(layer_strides[:i + 1]) // upsample_strides[i])
assert all([x == factors[0] for x in factors])
if use_norm:
if use_groupnorm:
BatchNorm2d = change_default_args(
num_groups=num_groups, eps=1e-3)(GroupNorm)
else:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
# note that when stride > 1, conv2d with same padding isn't
# equal to pad-conv2d. we should use pad-conv2d.
block2_input_filters = num_filters[0]
self.block1 = Sequential(
nn.ZeroPad2d(1),
Conv2d(
num_input_features, num_filters[0], 3,
stride=layer_strides[0]),
BatchNorm2d(num_filters[0]),
nn.ReLU(),
)
for i in range(layer_nums[0]):
self.block1.add(
Conv2d(num_filters[0], num_filters[0], 3, padding=1))
self.block1.add(BatchNorm2d(num_filters[0]))
self.block1.add(nn.ReLU())
self.deconv1 = Sequential(
ConvTranspose2d(
num_filters[0],
num_upsample_filters[0],
upsample_strides[0],
stride=upsample_strides[0]),
BatchNorm2d(num_upsample_filters[0]),
nn.ReLU(),
)
self.block2 = Sequential(
nn.ZeroPad2d(1),
Conv2d(
block2_input_filters,
num_filters[1],
3,
stride=layer_strides[1]),
BatchNorm2d(num_filters[1]),
nn.ReLU(),
)
for i in range(layer_nums[1]):
self.block2.add(
Conv2d(num_filters[1], num_filters[1], 3, padding=1))
self.block2.add(BatchNorm2d(num_filters[1]))
self.block2.add(nn.ReLU())
self.deconv2 = Sequential(
ConvTranspose2d(
num_filters[1],
num_upsample_filters[1],
upsample_strides[1],
stride=upsample_strides[1]),
BatchNorm2d(num_upsample_filters[1]),
nn.ReLU(),
)
self.block3 = Sequential(
nn.ZeroPad2d(1),
Conv2d(num_filters[1], num_filters[2], 3, stride=layer_strides[2]),
BatchNorm2d(num_filters[2]),
nn.ReLU(),
)
for i in range(layer_nums[2]):
self.block3.add(
Conv2d(num_filters[2], num_filters[2], 3, padding=1))
self.block3.add(BatchNorm2d(num_filters[2]))
self.block3.add(nn.ReLU())
self.deconv3 = Sequential(
ConvTranspose2d(
num_filters[2],
num_upsample_filters[2],
upsample_strides[2],
stride=upsample_strides[2]),
BatchNorm2d(num_upsample_filters[2]),
nn.ReLU(),
)
if encode_background_as_zeros:
num_cls = num_anchor_per_loc * num_class
else:
num_cls = num_anchor_per_loc * (num_class + 1)
self.conv_cls = nn.Conv2d(sum(num_upsample_filters), num_cls, 1)
self.conv_box = nn.Conv2d(
sum(num_upsample_filters), num_anchor_per_loc * box_code_size, 1)
if use_direction_classifier:
self.conv_dir_cls = nn.Conv2d(
sum(num_upsample_filters),
num_anchor_per_loc * num_direction_bins, 1)
if self._use_rc_net:
self.conv_rc = nn.Conv2d(
sum(num_upsample_filters), num_anchor_per_loc * box_code_size,
1)
|
deprecated. exists for checkpoint backward compilability (SECOND v1.0)
|
__init__
|
python
|
traveller59/second.pytorch
|
second/pytorch/models/rpn.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/models/rpn.py
|
MIT
|
def __init__(self,
use_norm=True,
num_class=2,
layer_nums=(3, 5, 5),
layer_strides=(2, 2, 2),
num_filters=(128, 128, 256),
upsample_strides=(1, 2, 4),
num_upsample_filters=(256, 256, 256),
num_input_features=128,
num_anchor_per_loc=2,
encode_background_as_zeros=True,
use_direction_classifier=True,
use_groupnorm=False,
num_groups=32,
box_code_size=7,
num_direction_bins=2,
name='rpn'):
"""upsample_strides support float: [0.25, 0.5, 1]
if upsample_strides < 1, conv2d will be used instead of convtranspose2d.
"""
super(RPNNoHeadBase, self).__init__()
self._layer_strides = layer_strides
self._num_filters = num_filters
self._layer_nums = layer_nums
self._upsample_strides = upsample_strides
self._num_upsample_filters = num_upsample_filters
self._num_input_features = num_input_features
self._use_norm = use_norm
self._use_groupnorm = use_groupnorm
self._num_groups = num_groups
assert len(layer_strides) == len(layer_nums)
assert len(num_filters) == len(layer_nums)
assert len(num_upsample_filters) == len(upsample_strides)
self._upsample_start_idx = len(layer_nums) - len(upsample_strides)
must_equal_list = []
for i in range(len(upsample_strides)):
must_equal_list.append(upsample_strides[i] / np.prod(
layer_strides[:i + self._upsample_start_idx + 1]))
for val in must_equal_list:
assert val == must_equal_list[0]
if use_norm:
if use_groupnorm:
BatchNorm2d = change_default_args(
num_groups=num_groups, eps=1e-3)(GroupNorm)
else:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
in_filters = [num_input_features, *num_filters[:-1]]
blocks = []
deblocks = []
for i, layer_num in enumerate(layer_nums):
block, num_out_filters = self._make_layer(
in_filters[i],
num_filters[i],
layer_num,
stride=layer_strides[i])
blocks.append(block)
if i - self._upsample_start_idx >= 0:
stride = upsample_strides[i - self._upsample_start_idx]
if stride >= 1:
stride = np.round(stride).astype(np.int64)
deblock = nn.Sequential(
ConvTranspose2d(
num_out_filters,
num_upsample_filters[i - self._upsample_start_idx],
stride,
stride=stride),
BatchNorm2d(
num_upsample_filters[i -
self._upsample_start_idx]),
nn.ReLU(),
)
else:
stride = np.round(1 / stride).astype(np.int64)
deblock = nn.Sequential(
Conv2d(
num_out_filters,
num_upsample_filters[i - self._upsample_start_idx],
stride,
stride=stride),
BatchNorm2d(
num_upsample_filters[i -
self._upsample_start_idx]),
nn.ReLU(),
)
deblocks.append(deblock)
self._num_out_filters = num_out_filters
self.blocks = nn.ModuleList(blocks)
self.deblocks = nn.ModuleList(deblocks)
|
upsample_strides support float: [0.25, 0.5, 1]
if upsample_strides < 1, conv2d will be used instead of convtranspose2d.
|
__init__
|
python
|
traveller59/second.pytorch
|
second/pytorch/models/rpn.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/models/rpn.py
|
MIT
|
def __init__(self,
use_norm=True,
num_class=2,
layer_nums=(3, 5, 5),
layer_strides=(2, 2, 2),
num_filters=(128, 128, 256),
upsample_strides=(1, 2, 4),
num_upsample_filters=(256, 256, 256),
num_input_features=128,
num_anchor_per_loc=2,
encode_background_as_zeros=True,
use_direction_classifier=True,
use_groupnorm=False,
num_groups=32,
box_code_size=7,
num_direction_bins=2,
name='rpn'):
"""upsample_strides support float: [0.25, 0.5, 1]
if upsample_strides < 1, conv2d will be used instead of convtranspose2d.
"""
super(RPNBase, self).__init__(
use_norm=use_norm,
num_class=num_class,
layer_nums=layer_nums,
layer_strides=layer_strides,
num_filters=num_filters,
upsample_strides=upsample_strides,
num_upsample_filters=num_upsample_filters,
num_input_features=num_input_features,
num_anchor_per_loc=num_anchor_per_loc,
encode_background_as_zeros=encode_background_as_zeros,
use_direction_classifier=use_direction_classifier,
use_groupnorm=use_groupnorm,
num_groups=num_groups,
box_code_size=box_code_size,
num_direction_bins=num_direction_bins,
name=name)
self._num_anchor_per_loc = num_anchor_per_loc
self._num_direction_bins = num_direction_bins
self._num_class = num_class
self._use_direction_classifier = use_direction_classifier
self._box_code_size = box_code_size
if encode_background_as_zeros:
num_cls = num_anchor_per_loc * num_class
else:
num_cls = num_anchor_per_loc * (num_class + 1)
if len(num_upsample_filters) == 0:
final_num_filters = self._num_out_filters
else:
final_num_filters = sum(num_upsample_filters)
self.conv_cls = nn.Conv2d(final_num_filters, num_cls, 1)
self.conv_box = nn.Conv2d(final_num_filters,
num_anchor_per_loc * box_code_size, 1)
if use_direction_classifier:
self.conv_dir_cls = nn.Conv2d(
final_num_filters, num_anchor_per_loc * num_direction_bins, 1)
|
upsample_strides support float: [0.25, 0.5, 1]
if upsample_strides < 1, conv2d will be used instead of convtranspose2d.
|
__init__
|
python
|
traveller59/second.pytorch
|
second/pytorch/models/rpn.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/models/rpn.py
|
MIT
|
def network_forward(self, voxels, num_points, coors, batch_size):
"""this function is used for subclass.
you can add custom network architecture by subclass VoxelNet class
and override this function.
Returns:
preds_dict: {
box_preds: ...
cls_preds: ...
dir_cls_preds: ...
}
"""
self.start_timer("voxel_feature_extractor")
voxel_features = self.voxel_feature_extractor(voxels, num_points,
coors)
self.end_timer("voxel_feature_extractor")
self.start_timer("middle forward")
spatial_features = self.middle_feature_extractor(
voxel_features, coors, batch_size)
self.end_timer("middle forward")
self.start_timer("rpn forward")
preds_dict = self.rpn(spatial_features)
self.end_timer("rpn forward")
return preds_dict
|
this function is used for subclass.
you can add custom network architecture by subclass VoxelNet class
and override this function.
Returns:
preds_dict: {
box_preds: ...
cls_preds: ...
dir_cls_preds: ...
}
|
network_forward
|
python
|
traveller59/second.pytorch
|
second/pytorch/models/voxelnet.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/models/voxelnet.py
|
MIT
|
def forward(self, example):
"""module's forward should always accept dict and return loss.
"""
voxels = example["voxels"]
num_points = example["num_points"]
coors = example["coordinates"]
if len(num_points.shape) == 2: # multi-gpu
num_voxel_per_batch = example["num_voxels"].cpu().numpy().reshape(
-1)
voxel_list = []
num_points_list = []
coors_list = []
for i, num_voxel in enumerate(num_voxel_per_batch):
voxel_list.append(voxels[i, :num_voxel])
num_points_list.append(num_points[i, :num_voxel])
coors_list.append(coors[i, :num_voxel])
voxels = torch.cat(voxel_list, dim=0)
num_points = torch.cat(num_points_list, dim=0)
coors = torch.cat(coors_list, dim=0)
batch_anchors = example["anchors"]
batch_size_dev = batch_anchors.shape[0]
# features: [num_voxels, max_num_points_per_voxel, 7]
# num_points: [num_voxels]
# coors: [num_voxels, 4]
preds_dict = self.network_forward(voxels, num_points, coors, batch_size_dev)
# need to check size.
box_preds = preds_dict["box_preds"].view(batch_size_dev, -1, self._box_coder.code_size)
err_msg = f"num_anchors={batch_anchors.shape[1]}, but num_output={box_preds.shape[1]}. please check size"
assert batch_anchors.shape[1] == box_preds.shape[1], err_msg
if self.training:
return self.loss(example, preds_dict)
else:
self.start_timer("predict")
with torch.no_grad():
res = self.predict(example, preds_dict)
self.end_timer("predict")
return res
|
module's forward should always accept dict and return loss.
|
forward
|
python
|
traveller59/second.pytorch
|
second/pytorch/models/voxelnet.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/models/voxelnet.py
|
MIT
|
def predict(self, example, preds_dict):
"""start with v1.6.0, this function don't contain any kitti-specific code.
Returns:
predict: list of pred_dict.
pred_dict: {
box3d_lidar: [N, 7] 3d box.
scores: [N]
label_preds: [N]
metadata: meta-data which contains dataset-specific information.
for kitti, it contains image idx (label idx),
for nuscenes, sample_token is saved in it.
}
"""
batch_size = example['anchors'].shape[0]
if "metadata" not in example or len(example["metadata"]) == 0:
meta_list = [None] * batch_size
else:
meta_list = example["metadata"]
batch_anchors = example["anchors"].view(batch_size, -1,
example["anchors"].shape[-1])
if "anchors_mask" not in example:
batch_anchors_mask = [None] * batch_size
else:
batch_anchors_mask = example["anchors_mask"].view(batch_size, -1)
t = time.time()
batch_box_preds = preds_dict["box_preds"]
batch_cls_preds = preds_dict["cls_preds"]
batch_box_preds = batch_box_preds.view(batch_size, -1,
self._box_coder.code_size)
num_class_with_bg = self._num_class
if not self._encode_background_as_zeros:
num_class_with_bg = self._num_class + 1
batch_cls_preds = batch_cls_preds.view(batch_size, -1,
num_class_with_bg)
batch_box_preds = self._box_coder.decode_torch(batch_box_preds,
batch_anchors)
if self._use_direction_classifier:
batch_dir_preds = preds_dict["dir_cls_preds"]
batch_dir_preds = batch_dir_preds.view(batch_size, -1,
self._num_direction_bins)
else:
batch_dir_preds = [None] * batch_size
predictions_dicts = []
post_center_range = None
if len(self._post_center_range) > 0:
post_center_range = torch.tensor(
self._post_center_range,
dtype=batch_box_preds.dtype,
device=batch_box_preds.device).float()
for box_preds, cls_preds, dir_preds, a_mask, meta in zip(
batch_box_preds, batch_cls_preds, batch_dir_preds,
batch_anchors_mask, meta_list):
if a_mask is not None:
box_preds = box_preds[a_mask]
cls_preds = cls_preds[a_mask]
box_preds = box_preds.float()
cls_preds = cls_preds.float()
if self._use_direction_classifier:
if a_mask is not None:
dir_preds = dir_preds[a_mask]
dir_labels = torch.max(dir_preds, dim=-1)[1]
if self._encode_background_as_zeros:
# this don't support softmax
assert self._use_sigmoid_score is True
total_scores = torch.sigmoid(cls_preds)
else:
# encode background as first element in one-hot vector
if self._use_sigmoid_score:
total_scores = torch.sigmoid(cls_preds)[..., 1:]
else:
total_scores = F.softmax(cls_preds, dim=-1)[..., 1:]
# Apply NMS in birdeye view
if self._use_rotate_nms:
nms_func = box_torch_ops.rotate_nms
else:
nms_func = box_torch_ops.nms
feature_map_size_prod = batch_box_preds.shape[
1] // self.target_assigner.num_anchors_per_location
if self._multiclass_nms:
assert self._encode_background_as_zeros is True
boxes_for_nms = box_preds[:, [0, 1, 3, 4, 6]]
if not self._use_rotate_nms:
box_preds_corners = box_torch_ops.center_to_corner_box2d(
boxes_for_nms[:, :2], boxes_for_nms[:, 2:4],
boxes_for_nms[:, 4])
boxes_for_nms = box_torch_ops.corner_to_standup_nd(
box_preds_corners)
selected_boxes, selected_labels, selected_scores = [], [], []
selected_dir_labels = []
scores = total_scores
boxes = boxes_for_nms
selected_per_class = []
score_threshs = self._nms_score_thresholds
pre_max_sizes = self._nms_pre_max_sizes
post_max_sizes = self._nms_post_max_sizes
iou_thresholds = self._nms_iou_thresholds
for class_idx, score_thresh, pre_ms, post_ms, iou_th in zip(
range(self._num_class),
score_threshs,
pre_max_sizes, post_max_sizes, iou_thresholds):
if self._nms_class_agnostic:
class_scores = total_scores.view(
feature_map_size_prod, -1,
self._num_class)[..., class_idx]
class_scores = class_scores.contiguous().view(-1)
class_boxes_nms = boxes.view(-1,
boxes_for_nms.shape[-1])
class_boxes = box_preds
class_dir_labels = dir_labels
else:
anchors_range = self.target_assigner.anchors_range(class_idx)
class_scores = total_scores.view(
-1,
self._num_class)[anchors_range[0]:anchors_range[1], class_idx]
class_boxes_nms = boxes.view(-1,
boxes_for_nms.shape[-1])[anchors_range[0]:anchors_range[1], :]
class_scores = class_scores.contiguous().view(-1)
class_boxes_nms = class_boxes_nms.contiguous().view(
-1, boxes_for_nms.shape[-1])
class_boxes = box_preds.view(-1,
box_preds.shape[-1])[anchors_range[0]:anchors_range[1], :]
class_boxes = class_boxes.contiguous().view(
-1, box_preds.shape[-1])
if self._use_direction_classifier:
class_dir_labels = dir_labels.view(-1)[anchors_range[0]:anchors_range[1]]
class_dir_labels = class_dir_labels.contiguous(
).view(-1)
if score_thresh > 0.0:
class_scores_keep = class_scores >= score_thresh
if class_scores_keep.shape[0] == 0:
selected_per_class.append(None)
continue
class_scores = class_scores[class_scores_keep]
if class_scores.shape[0] != 0:
if score_thresh > 0.0:
class_boxes_nms = class_boxes_nms[
class_scores_keep]
class_boxes = class_boxes[class_scores_keep]
class_dir_labels = class_dir_labels[
class_scores_keep]
keep = nms_func(class_boxes_nms, class_scores, pre_ms,
post_ms, iou_th)
if keep.shape[0] != 0:
selected_per_class.append(keep)
else:
selected_per_class.append(None)
else:
selected_per_class.append(None)
selected = selected_per_class[-1]
if selected is not None:
selected_boxes.append(class_boxes[selected])
selected_labels.append(
torch.full([class_boxes[selected].shape[0]],
class_idx,
dtype=torch.int64,
device=box_preds.device))
if self._use_direction_classifier:
selected_dir_labels.append(
class_dir_labels[selected])
selected_scores.append(class_scores[selected])
selected_boxes = torch.cat(selected_boxes, dim=0)
selected_labels = torch.cat(selected_labels, dim=0)
selected_scores = torch.cat(selected_scores, dim=0)
if self._use_direction_classifier:
selected_dir_labels = torch.cat(selected_dir_labels, dim=0)
else:
# get highest score per prediction, than apply nms
# to remove overlapped box.
if num_class_with_bg == 1:
top_scores = total_scores.squeeze(-1)
top_labels = torch.zeros(
total_scores.shape[0],
device=total_scores.device,
dtype=torch.long)
else:
top_scores, top_labels = torch.max(
total_scores, dim=-1)
if self._nms_score_thresholds[0] > 0.0:
top_scores_keep = top_scores >= self._nms_score_thresholds[0]
top_scores = top_scores.masked_select(top_scores_keep)
if top_scores.shape[0] != 0:
if self._nms_score_thresholds[0] > 0.0:
box_preds = box_preds[top_scores_keep]
if self._use_direction_classifier:
dir_labels = dir_labels[top_scores_keep]
top_labels = top_labels[top_scores_keep]
boxes_for_nms = box_preds[:, [0, 1, 3, 4, 6]]
if not self._use_rotate_nms:
box_preds_corners = box_torch_ops.center_to_corner_box2d(
boxes_for_nms[:, :2], boxes_for_nms[:, 2:4],
boxes_for_nms[:, 4])
boxes_for_nms = box_torch_ops.corner_to_standup_nd(
box_preds_corners)
# the nms in 3d detection just remove overlap boxes.
selected = nms_func(
boxes_for_nms,
top_scores,
pre_max_size=self._nms_pre_max_sizes[0],
post_max_size=self._nms_post_max_sizes[0],
iou_threshold=self._nms_iou_thresholds[0],
)
else:
selected = []
# if selected is not None:
selected_boxes = box_preds[selected]
if self._use_direction_classifier:
selected_dir_labels = dir_labels[selected]
selected_labels = top_labels[selected]
selected_scores = top_scores[selected]
# finally generate predictions.
if selected_boxes.shape[0] != 0:
box_preds = selected_boxes
scores = selected_scores
label_preds = selected_labels
if self._use_direction_classifier:
dir_labels = selected_dir_labels
period = (2 * np.pi / self._num_direction_bins)
dir_rot = box_torch_ops.limit_period(
box_preds[..., 6] - self._dir_offset,
self._dir_limit_offset, period)
box_preds[
...,
6] = dir_rot + self._dir_offset + period * dir_labels.to(
box_preds.dtype)
final_box_preds = box_preds
final_scores = scores
final_labels = label_preds
if post_center_range is not None:
mask = (final_box_preds[:, :3] >=
post_center_range[:3]).all(1)
mask &= (final_box_preds[:, :3] <=
post_center_range[3:]).all(1)
predictions_dict = {
"box3d_lidar": final_box_preds[mask],
"scores": final_scores[mask],
"label_preds": label_preds[mask],
"metadata": meta,
}
else:
predictions_dict = {
"box3d_lidar": final_box_preds,
"scores": final_scores,
"label_preds": label_preds,
"metadata": meta,
}
else:
dtype = batch_box_preds.dtype
device = batch_box_preds.device
predictions_dict = {
"box3d_lidar":
torch.zeros([0, box_preds.shape[-1]],
dtype=dtype,
device=device),
"scores":
torch.zeros([0], dtype=dtype, device=device),
"label_preds":
torch.zeros([0], dtype=top_labels.dtype, device=device),
"metadata":
meta,
}
predictions_dicts.append(predictions_dict)
return predictions_dicts
|
start with v1.6.0, this function don't contain any kitti-specific code.
Returns:
predict: list of pred_dict.
pred_dict: {
box3d_lidar: [N, 7] 3d box.
scores: [N]
label_preds: [N]
metadata: meta-data which contains dataset-specific information.
for kitti, it contains image idx (label idx),
for nuscenes, sample_token is saved in it.
}
|
predict
|
python
|
traveller59/second.pytorch
|
second/pytorch/models/voxelnet.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/models/voxelnet.py
|
MIT
|
def convert_norm_to_float(net):
'''
BatchNorm layers to have parameters in single precision.
Find all layers and convert them back to float. This can't
be done with built in .apply as that function will apply
fn to all modules, parameters, and buffers. Thus we wouldn't
be able to guard the float conversion based on the module type.
'''
if isinstance(net, torch.nn.modules.batchnorm._BatchNorm):
net.float()
for child in net.children():
VoxelNet.convert_norm_to_float(child)
return net
|
BatchNorm layers to have parameters in single precision.
Find all layers and convert them back to float. This can't
be done with built in .apply as that function will apply
fn to all modules, parameters, and buffers. Thus we wouldn't
be able to guard the float conversion based on the module type.
|
convert_norm_to_float
|
python
|
traveller59/second.pytorch
|
second/pytorch/models/voxelnet.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/models/voxelnet.py
|
MIT
|
def get_paddings_indicator(actual_num, max_num, axis=0):
"""Create boolean mask by actually number of a padded tensor.
Args:
actual_num ([type]): [description]
max_num ([type]): [description]
Returns:
[type]: [description]
"""
actual_num = torch.unsqueeze(actual_num, axis + 1)
# tiled_actual_num: [N, M, 1]
max_num_shape = [1] * len(actual_num.shape)
max_num_shape[axis + 1] = -1
max_num = torch.arange(
max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape)
# tiled_actual_num: [[3,3,3,3,3], [4,4,4,4,4], [2,2,2,2,2]]
# tiled_max_num: [[0,1,2,3,4], [0,1,2,3,4], [0,1,2,3,4]]
paddings_indicator = actual_num.int() > max_num
# paddings_indicator shape: [batch_size, max_num]
return paddings_indicator
|
Create boolean mask by actually number of a padded tensor.
Args:
actual_num ([type]): [description]
max_num ([type]): [description]
Returns:
[type]: [description]
|
get_paddings_indicator
|
python
|
traveller59/second.pytorch
|
second/pytorch/models/voxel_encoder.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/models/voxel_encoder.py
|
MIT
|
def box3d_overlap_kernel(boxes,
qboxes,
rinc,
criterion=-1,
z_axis=1,
z_center=1.0):
"""
z_axis: the z (height) axis.
z_center: unified z (height) center of box.
"""
N, K = boxes.shape[0], qboxes.shape[0]
for i in range(N):
for j in range(K):
if rinc[i, j] > 0:
min_z = min(
boxes[i, z_axis] + boxes[i, z_axis + 3] * (1 - z_center),
qboxes[j, z_axis] + qboxes[j, z_axis + 3] * (1 - z_center))
max_z = max(
boxes[i, z_axis] - boxes[i, z_axis + 3] * z_center,
qboxes[j, z_axis] - qboxes[j, z_axis + 3] * z_center)
iw = min_z - max_z
if iw > 0:
area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5]
area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5]
inc = iw * rinc[i, j]
if criterion == -1:
ua = (area1 + area2 - inc)
elif criterion == 0:
ua = area1
elif criterion == 1:
ua = area2
else:
ua = 1.0
rinc[i, j] = inc / ua
else:
rinc[i, j] = 0.0
|
z_axis: the z (height) axis.
z_center: unified z (height) center of box.
|
box3d_overlap_kernel
|
python
|
traveller59/second.pytorch
|
second/utils/eval.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/utils/eval.py
|
MIT
|
def calculate_iou_partly(gt_annos,
dt_annos,
metric,
num_parts=50,
z_axis=1,
z_center=1.0):
"""fast iou algorithm. this function can be used independently to
do result analysis.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
metric: eval type. 0: bbox, 1: bev, 2: 3d
num_parts: int. a parameter for fast calculate algorithm
z_axis: height axis. kitti camera use 1, lidar use 2.
"""
assert len(gt_annos) == len(dt_annos)
total_dt_num = np.stack([len(a["name"]) for a in dt_annos], 0)
total_gt_num = np.stack([len(a["name"]) for a in gt_annos], 0)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
parted_overlaps = []
example_idx = 0
bev_axes = list(range(3))
bev_axes.pop(z_axis)
for num_part in split_parts:
gt_annos_part = gt_annos[example_idx:example_idx + num_part]
dt_annos_part = dt_annos[example_idx:example_idx + num_part]
if metric == 0:
gt_boxes = np.concatenate([a["bbox"] for a in gt_annos_part], 0)
dt_boxes = np.concatenate([a["bbox"] for a in dt_annos_part], 0)
overlap_part = image_box_overlap(gt_boxes, dt_boxes)
elif metric == 1:
loc = np.concatenate(
[a["location"][:, bev_axes] for a in gt_annos_part], 0)
dims = np.concatenate(
[a["dimensions"][:, bev_axes] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1)
loc = np.concatenate(
[a["location"][:, bev_axes] for a in dt_annos_part], 0)
dims = np.concatenate(
[a["dimensions"][:, bev_axes] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1)
overlap_part = bev_box_overlap(gt_boxes,
dt_boxes).astype(np.float64)
elif metric == 2:
loc = np.concatenate([a["location"] for a in gt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1)
loc = np.concatenate([a["location"] for a in dt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1)
overlap_part = box3d_overlap(
gt_boxes, dt_boxes, z_axis=z_axis,
z_center=z_center).astype(np.float64)
else:
raise ValueError("unknown metric")
parted_overlaps.append(overlap_part)
example_idx += num_part
overlaps = []
example_idx = 0
for j, num_part in enumerate(split_parts):
gt_annos_part = gt_annos[example_idx:example_idx + num_part]
dt_annos_part = dt_annos[example_idx:example_idx + num_part]
gt_num_idx, dt_num_idx = 0, 0
for i in range(num_part):
gt_box_num = total_gt_num[example_idx + i]
dt_box_num = total_dt_num[example_idx + i]
overlaps.append(
parted_overlaps[j][gt_num_idx:gt_num_idx +
gt_box_num, dt_num_idx:dt_num_idx +
dt_box_num])
gt_num_idx += gt_box_num
dt_num_idx += dt_box_num
example_idx += num_part
return overlaps, parted_overlaps, total_gt_num, total_dt_num
|
fast iou algorithm. this function can be used independently to
do result analysis.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
metric: eval type. 0: bbox, 1: bev, 2: 3d
num_parts: int. a parameter for fast calculate algorithm
z_axis: height axis. kitti camera use 1, lidar use 2.
|
calculate_iou_partly
|
python
|
traveller59/second.pytorch
|
second/utils/eval.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/utils/eval.py
|
MIT
|
def eval_class_v3(gt_annos,
dt_annos,
current_classes,
difficultys,
metric,
min_overlaps,
compute_aos=False,
z_axis=1,
z_center=1.0,
num_parts=50):
"""Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
current_class: int, 0: car, 1: pedestrian, 2: cyclist
difficulty: int. eval difficulty, 0: easy, 1: normal, 2: hard
metric: eval type. 0: bbox, 1: bev, 2: 3d
min_overlap: float, min overlap. official:
[[0.7, 0.5, 0.5], [0.7, 0.5, 0.5], [0.7, 0.5, 0.5]]
format: [metric, class]. choose one from matrix above.
num_parts: int. a parameter for fast calculate algorithm
Returns:
dict of recall, precision and aos
"""
assert len(gt_annos) == len(dt_annos)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
rets = calculate_iou_partly(
dt_annos,
gt_annos,
metric,
num_parts,
z_axis=z_axis,
z_center=z_center)
overlaps, parted_overlaps, total_dt_num, total_gt_num = rets
N_SAMPLE_PTS = 41
num_minoverlap = len(min_overlaps)
num_class = len(current_classes)
num_difficulty = len(difficultys)
precision = np.zeros(
[num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
recall = np.zeros(
[num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
all_thresholds = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
for m, current_class in enumerate(current_classes):
for l, difficulty in enumerate(difficultys):
rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty)
(gt_datas_list, dt_datas_list, ignored_gts, ignored_dets,
dontcares, total_dc_num, total_num_valid_gt) = rets
for k, min_overlap in enumerate(min_overlaps[:, metric, m]):
thresholdss = []
for i in range(len(gt_annos)):
rets = compute_statistics_jit(
overlaps[i],
gt_datas_list[i],
dt_datas_list[i],
ignored_gts[i],
ignored_dets[i],
dontcares[i],
metric,
min_overlap=min_overlap,
thresh=0.0,
compute_fp=False)
tp, fp, fn, similarity, thresholds = rets
thresholdss += thresholds.tolist()
thresholdss = np.array(thresholdss)
thresholds = get_thresholds(thresholdss, total_num_valid_gt)
thresholds = np.array(thresholds)
# print(thresholds)
all_thresholds[m, l, k, :len(thresholds)] = thresholds
pr = np.zeros([len(thresholds), 4])
idx = 0
for j, num_part in enumerate(split_parts):
gt_datas_part = np.concatenate(
gt_datas_list[idx:idx + num_part], 0)
dt_datas_part = np.concatenate(
dt_datas_list[idx:idx + num_part], 0)
dc_datas_part = np.concatenate(
dontcares[idx:idx + num_part], 0)
ignored_dets_part = np.concatenate(
ignored_dets[idx:idx + num_part], 0)
ignored_gts_part = np.concatenate(
ignored_gts[idx:idx + num_part], 0)
fused_compute_statistics(
parted_overlaps[j],
pr,
total_gt_num[idx:idx + num_part],
total_dt_num[idx:idx + num_part],
total_dc_num[idx:idx + num_part],
gt_datas_part,
dt_datas_part,
dc_datas_part,
ignored_gts_part,
ignored_dets_part,
metric,
min_overlap=min_overlap,
thresholds=thresholds,
compute_aos=compute_aos)
idx += num_part
for i in range(len(thresholds)):
# recall[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2])
precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])
if compute_aos:
aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])
for i in range(len(thresholds)):
precision[m, l, k, i] = np.max(
precision[m, l, k, i:], axis=-1)
# recall[m, l, k, i] = np.max(recall[m, l, k, :i + 1], axis=-1)
if compute_aos:
aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)
# use interp to calculate recall
"""
current_recalls = np.linspace(0, 1, 41)
prec_unique, inds = np.unique(precision[m, l, k], return_index=True)
current_recalls = current_recalls[inds]
f = interp1d(prec_unique, current_recalls)
precs_for_recall = np.linspace(0, 1, 41)
max_prec = np.max(precision[m, l, k])
valid_prec = precs_for_recall < max_prec
num_valid_prec = valid_prec.sum()
recall[m, l, k, :num_valid_prec] = f(precs_for_recall[valid_prec])
"""
ret_dict = {
"recall": recall, # [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]
"precision": precision,
"orientation": aos,
"thresholds": all_thresholds,
"min_overlaps": min_overlaps,
}
return ret_dict
|
Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
current_class: int, 0: car, 1: pedestrian, 2: cyclist
difficulty: int. eval difficulty, 0: easy, 1: normal, 2: hard
metric: eval type. 0: bbox, 1: bev, 2: 3d
min_overlap: float, min overlap. official:
[[0.7, 0.5, 0.5], [0.7, 0.5, 0.5], [0.7, 0.5, 0.5]]
format: [metric, class]. choose one from matrix above.
num_parts: int. a parameter for fast calculate algorithm
Returns:
dict of recall, precision and aos
|
eval_class_v3
|
python
|
traveller59/second.pytorch
|
second/utils/eval.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/utils/eval.py
|
MIT
|
def get_official_eval_result(gt_annos,
dt_annos,
current_classes,
difficultys=[0, 1, 2],
z_axis=1,
z_center=1.0):
"""
gt_annos and dt_annos must contains following keys:
[bbox, location, dimensions, rotation_y, score]
"""
overlap_mod = np.array([[0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7],
[0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7],
[0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7]])
overlap_easy = np.array([[0.7, 0.5, 0.5, 0.7, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.25, 0.25, 0.5, 0.25, 0.5, 0.5, 0.5],
[0.5, 0.25, 0.25, 0.5, 0.25, 0.5, 0.5, 0.5]])
min_overlaps = np.stack([overlap_mod, overlap_easy], axis=0) # [2, 3, 5]
class_to_name = {
0: 'Car',
1: 'Pedestrian',
2: 'Cyclist',
3: 'Van',
4: 'Person_sitting',
5: 'car',
6: 'tractor',
7: 'trailer',
}
name_to_class = {v: n for n, v in class_to_name.items()}
if not isinstance(current_classes, (list, tuple)):
current_classes = [current_classes]
current_classes_int = []
for curcls in current_classes:
if isinstance(curcls, str):
current_classes_int.append(name_to_class[curcls])
else:
current_classes_int.append(curcls)
current_classes = current_classes_int
min_overlaps = min_overlaps[:, :, current_classes]
result = ''
# check whether alpha is valid
compute_aos = False
for anno in dt_annos:
if anno['alpha'].shape[0] != 0:
if anno['alpha'][0] != -10:
compute_aos = True
break
metrics = do_eval_v3(
gt_annos,
dt_annos,
current_classes,
min_overlaps,
compute_aos,
difficultys,
z_axis=z_axis,
z_center=z_center)
detail = {}
for j, curcls in enumerate(current_classes):
# mAP threshold array: [num_minoverlap, metric, class]
# mAP result: [num_class, num_diff, num_minoverlap]
class_name = class_to_name[curcls]
detail[class_name] = {}
for i in range(min_overlaps.shape[0]):
mAPbbox = get_mAP(metrics["bbox"]["precision"][j, :, i])
mAPbev = get_mAP(metrics["bev"]["precision"][j, :, i])
mAP3d = get_mAP(metrics["3d"]["precision"][j, :, i])
detail[class_name][f"bbox@{min_overlaps[i, 0, j]:.2f}"] = mAPbbox.tolist()
detail[class_name][f"bev@{min_overlaps[i, 1, j]:.2f}"] = mAPbev.tolist()
detail[class_name][f"3d@{min_overlaps[i, 2, j]:.2f}"] = mAP3d.tolist()
result += print_str(
(f"{class_to_name[curcls]} "
"AP(Average Precision)@{:.2f}, {:.2f}, {:.2f}:".format(*min_overlaps[i, :, j])))
mAPbbox = ", ".join(f"{v:.2f}" for v in mAPbbox)
mAPbev = ", ".join(f"{v:.2f}" for v in mAPbev)
mAP3d = ", ".join(f"{v:.2f}" for v in mAP3d)
result += print_str(f"bbox AP:{mAPbbox}")
result += print_str(f"bev AP:{mAPbev}")
result += print_str(f"3d AP:{mAP3d}")
if compute_aos:
mAPaos = get_mAP(metrics["bbox"]["orientation"][j, :, i])
detail[class_name][f"aos"] = mAPaos.tolist()
mAPaos = ", ".join(f"{v:.2f}" for v in mAPaos)
result += print_str(f"aos AP:{mAPaos}")
return {
"result": result,
"detail": detail,
}
|
gt_annos and dt_annos must contains following keys:
[bbox, location, dimensions, rotation_y, score]
|
get_official_eval_result
|
python
|
traveller59/second.pytorch
|
second/utils/eval.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/utils/eval.py
|
MIT
|
def flat_nested_json_dict(json_dict, sep=".") -> dict:
"""flat a nested json-like dict. this function make shadow copy.
"""
flatted = {}
for k, v in json_dict.items():
if isinstance(v, dict):
_flat_nested_json_dict(v, flatted, sep, str(k))
else:
flatted[str(k)] = v
return flatted
|
flat a nested json-like dict. this function make shadow copy.
|
flat_nested_json_dict
|
python
|
traveller59/second.pytorch
|
second/utils/log_tool.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/utils/log_tool.py
|
MIT
|
def log_text(self, text, step, tag="regular log"):
"""This function only add text to log.txt and tensorboard texts
"""
print(text)
print(text, file=self.log_file)
if step > self._text_current_gstep and self._text_current_gstep != -1:
total_text = '\n'.join(self._tb_texts)
self.summary_writter.add_text(tag, total_text, global_step=step)
self._tb_texts = []
self._text_current_gstep = step
else:
self._tb_texts.append(text)
if self._text_current_gstep == -1:
self._text_current_gstep = step
|
This function only add text to log.txt and tensorboard texts
|
log_text
|
python
|
traveller59/second.pytorch
|
second/utils/log_tool.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/utils/log_tool.py
|
MIT
|
def points_to_bev(points,
voxel_size,
coors_range,
with_reflectivity=False,
density_norm_num=16,
max_voxels=40000):
"""convert kitti points(N, 4) to a bev map. return [C, H, W] map.
this function based on algorithm in points_to_voxel.
takes 5ms in a reduced pointcloud with voxel_size=[0.1, 0.1, 0.8]
Args:
points: [N, ndim] float tensor. points[:, :3] contain xyz points and
points[:, 3] contain reflectivity.
voxel_size: [3] list/tuple or array, float. xyz, indicate voxel size
coors_range: [6] list/tuple or array, float. indicate voxel range.
format: xyzxyz, minmax
with_reflectivity: bool. if True, will add a intensity map to bev map.
Returns:
bev_map: [num_height_maps + 1(2), H, W] float tensor.
`WARNING`: bev_map[-1] is num_points map, NOT density map,
because calculate density map need more time in cpu rather than gpu.
if with_reflectivity is True, bev_map[-2] is intensity map.
"""
if not isinstance(voxel_size, np.ndarray):
voxel_size = np.array(voxel_size, dtype=points.dtype)
if not isinstance(coors_range, np.ndarray):
coors_range = np.array(coors_range, dtype=points.dtype)
voxelmap_shape = (coors_range[3:] - coors_range[:3]) / voxel_size
voxelmap_shape = tuple(np.round(voxelmap_shape).astype(np.int32).tolist())
voxelmap_shape = voxelmap_shape[::-1] # DHW format
coor_to_voxelidx = -np.ones(shape=voxelmap_shape, dtype=np.int32)
# coors_2d = np.zeros(shape=(max_voxels, 2), dtype=np.int32)
bev_map_shape = list(voxelmap_shape)
bev_map_shape[0] += 1
height_lowers = np.linspace(
coors_range[2], coors_range[5], voxelmap_shape[0], endpoint=False)
if with_reflectivity:
bev_map_shape[0] += 1
bev_map = np.zeros(shape=bev_map_shape, dtype=points.dtype)
_points_to_bevmap_reverse_kernel(points, voxel_size, coors_range,
coor_to_voxelidx, bev_map, height_lowers,
with_reflectivity, max_voxels)
# print(voxel_num)
return bev_map
|
convert kitti points(N, 4) to a bev map. return [C, H, W] map.
this function based on algorithm in points_to_voxel.
takes 5ms in a reduced pointcloud with voxel_size=[0.1, 0.1, 0.8]
Args:
points: [N, ndim] float tensor. points[:, :3] contain xyz points and
points[:, 3] contain reflectivity.
voxel_size: [3] list/tuple or array, float. xyz, indicate voxel size
coors_range: [6] list/tuple or array, float. indicate voxel range.
format: xyzxyz, minmax
with_reflectivity: bool. if True, will add a intensity map to bev map.
Returns:
bev_map: [num_height_maps + 1(2), H, W] float tensor.
`WARNING`: bev_map[-1] is num_points map, NOT density map,
because calculate density map need more time in cpu rather than gpu.
if with_reflectivity is True, bev_map[-2] is intensity map.
|
points_to_bev
|
python
|
traveller59/second.pytorch
|
second/utils/simplevis.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/utils/simplevis.py
|
MIT
|
def scatter_nd(indices, updates, shape):
"""pytorch edition of tensorflow scatter_nd.
this function don't contain except handle code. so use this carefully
when indice repeats, don't support repeat add which is supported
in tensorflow.
"""
ret = torch.zeros(*shape, dtype=updates.dtype, device=updates.device)
ndim = indices.shape[-1]
output_shape = list(indices.shape[:-1]) + shape[indices.shape[-1]:]
flatted_indices = indices.view(-1, ndim)
slices = [flatted_indices[:, i] for i in range(ndim)]
slices += [Ellipsis]
ret[slices] = updates.view(*output_shape)
return ret
|
pytorch edition of tensorflow scatter_nd.
this function don't contain except handle code. so use this carefully
when indice repeats, don't support repeat add which is supported
in tensorflow.
|
scatter_nd
|
python
|
traveller59/second.pytorch
|
torchplus/ops/array_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/torchplus/ops/array_ops.py
|
MIT
|
def latest_checkpoint(model_dir, model_name):
"""return path of latest checkpoint in a model_dir
Args:
model_dir: string, indicate your model dir(save ckpts, summarys,
logs, etc).
model_name: name of your model. we find ckpts by name
Returns:
path: None if isn't exist or latest checkpoint path.
"""
ckpt_info_path = Path(model_dir) / "checkpoints.json"
if not ckpt_info_path.is_file():
return None
with open(ckpt_info_path, 'r') as f:
ckpt_dict = json.loads(f.read())
if model_name not in ckpt_dict['latest_ckpt']:
return None
latest_ckpt = ckpt_dict['latest_ckpt'][model_name]
ckpt_file_name = Path(model_dir) / latest_ckpt
if not ckpt_file_name.is_file():
return None
return str(ckpt_file_name)
|
return path of latest checkpoint in a model_dir
Args:
model_dir: string, indicate your model dir(save ckpts, summarys,
logs, etc).
model_name: name of your model. we find ckpts by name
Returns:
path: None if isn't exist or latest checkpoint path.
|
latest_checkpoint
|
python
|
traveller59/second.pytorch
|
torchplus/train/checkpoint.py
|
https://github.com/traveller59/second.pytorch/blob/master/torchplus/train/checkpoint.py
|
MIT
|
def save(model_dir,
model,
model_name,
global_step,
max_to_keep=8,
keep_latest=True):
"""save a model into model_dir.
Args:
model_dir: string, indicate your model dir(save ckpts, summarys,
logs, etc).
model: torch.nn.Module instance.
model_name: name of your model. we find ckpts by name
global_step: int, indicate current global step.
max_to_keep: int, maximum checkpoints to keep.
keep_latest: bool, if True and there are too much ckpts,
will delete oldest ckpt. else will delete ckpt which has
smallest global step.
Returns:
path: None if isn't exist or latest checkpoint path.
"""
# prevent save incomplete checkpoint due to key interrupt
with DelayedKeyboardInterrupt():
ckpt_info_path = Path(model_dir) / "checkpoints.json"
ckpt_filename = "{}-{}.tckpt".format(model_name, global_step)
ckpt_path = Path(model_dir) / ckpt_filename
if not ckpt_info_path.is_file():
ckpt_info_dict = {'latest_ckpt': {}, 'all_ckpts': {}}
else:
with open(ckpt_info_path, 'r') as f:
ckpt_info_dict = json.loads(f.read())
ckpt_info_dict['latest_ckpt'][model_name] = ckpt_filename
if model_name in ckpt_info_dict['all_ckpts']:
ckpt_info_dict['all_ckpts'][model_name].append(ckpt_filename)
else:
ckpt_info_dict['all_ckpts'][model_name] = [ckpt_filename]
all_ckpts = ckpt_info_dict['all_ckpts'][model_name]
torch.save(model.state_dict(), ckpt_path)
# check ckpt in all_ckpts is exist, if not, delete it from all_ckpts
all_ckpts_checked = []
for ckpt in all_ckpts:
ckpt_path_uncheck = Path(model_dir) / ckpt
if ckpt_path_uncheck.is_file():
all_ckpts_checked.append(str(ckpt_path_uncheck))
all_ckpts = all_ckpts_checked
if len(all_ckpts) > max_to_keep:
if keep_latest:
ckpt_to_delete = all_ckpts.pop(0)
else:
# delete smallest step
get_step = lambda name: int(name.split('.')[0].split('-')[1])
min_step = min([get_step(name) for name in all_ckpts])
ckpt_to_delete = "{}-{}.tckpt".format(model_name, min_step)
all_ckpts.remove(ckpt_to_delete)
os.remove(str(Path(model_dir) / ckpt_to_delete))
all_ckpts_filename = _ordered_unique([Path(f).name for f in all_ckpts])
ckpt_info_dict['all_ckpts'][model_name] = all_ckpts_filename
with open(ckpt_info_path, 'w') as f:
f.write(json.dumps(ckpt_info_dict, indent=2))
|
save a model into model_dir.
Args:
model_dir: string, indicate your model dir(save ckpts, summarys,
logs, etc).
model: torch.nn.Module instance.
model_name: name of your model. we find ckpts by name
global_step: int, indicate current global step.
max_to_keep: int, maximum checkpoints to keep.
keep_latest: bool, if True and there are too much ckpts,
will delete oldest ckpt. else will delete ckpt which has
smallest global step.
Returns:
path: None if isn't exist or latest checkpoint path.
|
save
|
python
|
traveller59/second.pytorch
|
torchplus/train/checkpoint.py
|
https://github.com/traveller59/second.pytorch/blob/master/torchplus/train/checkpoint.py
|
MIT
|
def split_bn_bias(layer_groups):
"Split the layers in `layer_groups` into batchnorm (`bn_types`) and non-batchnorm groups."
split_groups = []
for l in layer_groups:
l1, l2 = [], []
for c in l.children():
if isinstance(c, bn_types): l2.append(c)
else: l1.append(c)
split_groups += [nn.Sequential(*l1), nn.Sequential(*l2)]
return split_groups
|
Split the layers in `layer_groups` into batchnorm (`bn_types`) and non-batchnorm groups.
|
split_bn_bias
|
python
|
traveller59/second.pytorch
|
torchplus/train/fastai_optim.py
|
https://github.com/traveller59/second.pytorch/blob/master/torchplus/train/fastai_optim.py
|
MIT
|
def get_master(layer_groups, flat_master: bool = False):
"Return two lists, one for the model parameters in FP16 and one for the master parameters in FP32."
split_groups = split_bn_bias(layer_groups)
model_params = [[
param for param in lg.parameters() if param.requires_grad
] for lg in split_groups]
if flat_master:
master_params = []
for lg in model_params:
if len(lg) != 0:
mp = parameters_to_vector([param.data.float() for param in lg])
mp = torch.nn.Parameter(mp, requires_grad=True)
if mp.grad is None: mp.grad = mp.new(*mp.size())
master_params.append([mp])
else:
master_params.append([])
return model_params, master_params
else:
master_params = [[param.clone().float().detach() for param in lg]
for lg in model_params]
for mp in master_params:
for param in mp:
param.requires_grad = True
return model_params, master_params
|
Return two lists, one for the model parameters in FP16 and one for the master parameters in FP32.
|
get_master
|
python
|
traveller59/second.pytorch
|
torchplus/train/fastai_optim.py
|
https://github.com/traveller59/second.pytorch/blob/master/torchplus/train/fastai_optim.py
|
MIT
|
def model_g2master_g(model_params, master_params,
flat_master: bool = False) -> None:
"Copy the `model_params` gradients to `master_params` for the optimizer step."
if flat_master:
for model_group, master_group in zip(model_params, master_params):
if len(master_group) != 0:
master_group[0].grad.data.copy_(
parameters_to_vector(
[p.grad.data.float() for p in model_group]))
else:
for model_group, master_group in zip(model_params, master_params):
for model, master in zip(model_group, master_group):
if model.grad is not None:
if master.grad is None:
master.grad = master.data.new(*master.data.size())
master.grad.data.copy_(model.grad.data)
else:
master.grad = None
|
Copy the `model_params` gradients to `master_params` for the optimizer step.
|
model_g2master_g
|
python
|
traveller59/second.pytorch
|
torchplus/train/fastai_optim.py
|
https://github.com/traveller59/second.pytorch/blob/master/torchplus/train/fastai_optim.py
|
MIT
|
def listify(p=None, q=None):
"Make `p` listy and the same length as `q`."
if p is None: p = []
elif isinstance(p, str): p = [p]
elif not isinstance(p, Iterable): p = [p]
n = q if type(q) == int else len(p) if q is None else len(q)
if len(p) == 1: p = p * n
assert len(p) == n, f'List len mismatch ({len(p)} vs {n})'
return list(p)
|
Make `p` listy and the same length as `q`.
|
listify
|
python
|
traveller59/second.pytorch
|
torchplus/train/fastai_optim.py
|
https://github.com/traveller59/second.pytorch/blob/master/torchplus/train/fastai_optim.py
|
MIT
|
def trainable_params(m: nn.Module):
"Return list of trainable params in `m`."
res = filter(lambda p: p.requires_grad, m.parameters())
return res
|
Return list of trainable params in `m`.
|
trainable_params
|
python
|
traveller59/second.pytorch
|
torchplus/train/fastai_optim.py
|
https://github.com/traveller59/second.pytorch/blob/master/torchplus/train/fastai_optim.py
|
MIT
|
def create(cls, opt_func, lr, layer_groups, **kwargs):
"Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."
split_groups = split_bn_bias(layer_groups)
opt = opt_func([{
'params': trainable_params(l),
'lr': 0
} for l in split_groups])
opt = cls(opt, **kwargs)
opt.lr, opt.opt_func = listify(lr, layer_groups), opt_func
return opt
|
Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`.
|
create
|
python
|
traveller59/second.pytorch
|
torchplus/train/fastai_optim.py
|
https://github.com/traveller59/second.pytorch/blob/master/torchplus/train/fastai_optim.py
|
MIT
|
def new(self, layer_groups):
"Create a new `OptimWrapper` from `self` with another `layer_groups` but the same hyper-parameters."
opt_func = getattr(self, 'opt_func', self.opt.__class__)
split_groups = split_bn_bias(layer_groups)
opt = opt_func([{
'params': trainable_params(l),
'lr': 0
} for l in split_groups])
return self.create(
opt_func,
self.lr,
layer_groups,
wd=self.wd,
true_wd=self.true_wd,
bn_wd=self.bn_wd)
|
Create a new `OptimWrapper` from `self` with another `layer_groups` but the same hyper-parameters.
|
new
|
python
|
traveller59/second.pytorch
|
torchplus/train/fastai_optim.py
|
https://github.com/traveller59/second.pytorch/blob/master/torchplus/train/fastai_optim.py
|
MIT
|
def step(self) -> None:
"Set weight decay and step optimizer."
# weight decay outside of optimizer step (AdamW)
if self.true_wd:
for lr, wd, pg1, pg2 in zip(self._lr, self._wd,
self.opt.param_groups[::2],
self.opt.param_groups[1::2]):
for p in pg1['params']:
p.data.mul_(1 - wd * lr)
if self.bn_wd:
for p in pg2['params']:
p.data.mul_(1 - wd * lr)
self.set_val('weight_decay', listify(0, self._wd))
self.opt.step()
|
Set weight decay and step optimizer.
|
step
|
python
|
traveller59/second.pytorch
|
torchplus/train/fastai_optim.py
|
https://github.com/traveller59/second.pytorch/blob/master/torchplus/train/fastai_optim.py
|
MIT
|
def clear(self):
"Reset the state of the inner optimizer."
sd = self.state_dict()
sd['state'] = {}
self.load_state_dict(sd)
|
Reset the state of the inner optimizer.
|
clear
|
python
|
traveller59/second.pytorch
|
torchplus/train/fastai_optim.py
|
https://github.com/traveller59/second.pytorch/blob/master/torchplus/train/fastai_optim.py
|
MIT
|
def beta(self, val: float) -> None:
"Set beta (or alpha as makes sense for given optimizer)."
if val is None: return
if 'betas' in self.opt_keys:
self.set_val('betas', (self._mom, listify(val, self._beta)))
elif 'alpha' in self.opt_keys:
self.set_val('alpha', listify(val, self._beta))
self._beta = listify(val, self._beta)
|
Set beta (or alpha as makes sense for given optimizer).
|
beta
|
python
|
traveller59/second.pytorch
|
torchplus/train/fastai_optim.py
|
https://github.com/traveller59/second.pytorch/blob/master/torchplus/train/fastai_optim.py
|
MIT
|
def read_defaults(self) -> None:
"Read the values inside the optimizer for the hyper-parameters."
self._beta = None
if 'lr' in self.opt_keys: self._lr = self.read_val('lr')
if 'momentum' in self.opt_keys: self._mom = self.read_val('momentum')
if 'alpha' in self.opt_keys: self._beta = self.read_val('alpha')
if 'betas' in self.opt_keys:
self._mom, self._beta = self.read_val('betas')
if 'weight_decay' in self.opt_keys:
self._wd = self.read_val('weight_decay')
|
Read the values inside the optimizer for the hyper-parameters.
|
read_defaults
|
python
|
traveller59/second.pytorch
|
torchplus/train/fastai_optim.py
|
https://github.com/traveller59/second.pytorch/blob/master/torchplus/train/fastai_optim.py
|
MIT
|
def set_val(self, key: str, val, bn_groups: bool = True):
"Set `val` inside the optimizer dictionary at `key`."
if is_tuple(val): val = [(v1, v2) for v1, v2 in zip(*val)]
for v, pg1, pg2 in zip(val, self.opt.param_groups[::2],
self.opt.param_groups[1::2]):
pg1[key] = v
if bn_groups: pg2[key] = v
return val
|
Set `val` inside the optimizer dictionary at `key`.
|
set_val
|
python
|
traveller59/second.pytorch
|
torchplus/train/fastai_optim.py
|
https://github.com/traveller59/second.pytorch/blob/master/torchplus/train/fastai_optim.py
|
MIT
|
def read_val(self, key: str):
"Read a hyperparameter `key` in the optimizer dictionary."
val = [pg[key] for pg in self.opt.param_groups[::2]]
if is_tuple(val[0]): val = [o[0] for o in val], [o[1] for o in val]
return val
|
Read a hyperparameter `key` in the optimizer dictionary.
|
read_val
|
python
|
traveller59/second.pytorch
|
torchplus/train/fastai_optim.py
|
https://github.com/traveller59/second.pytorch/blob/master/torchplus/train/fastai_optim.py
|
MIT
|
def create(cls,
opt_func,
lr,
layer_groups,
model,
flat_master=False,
loss_scale=512.0,
**kwargs):
"Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."
opt = OptimWrapper.create(opt_func, lr, layer_groups, **kwargs)
opt.model_params, opt.master_params = get_master(
layer_groups, flat_master)
opt.flat_master = flat_master
opt.loss_scale = loss_scale
opt.model = model
#Changes the optimizer so that the optimization step is done in FP32.
# opt = self.learn.opt
mom, wd, beta = opt.mom, opt.wd, opt.beta
lrs = [lr for lr in opt._lr for _ in range(2)]
opt_params = [{
'params': mp,
'lr': lr
} for mp, lr in zip(opt.master_params, lrs)]
opt.opt = opt_func(opt_params)
opt.mom, opt.wd, opt.beta = mom, wd, beta
return opt
|
Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`.
|
create
|
python
|
traveller59/second.pytorch
|
torchplus/train/fastai_optim.py
|
https://github.com/traveller59/second.pytorch/blob/master/torchplus/train/fastai_optim.py
|
MIT
|
def annealing_cos(start, end, pct):
# print(pct, start, end)
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start - end) / 2 * cos_out
|
Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0.
|
annealing_cos
|
python
|
traveller59/second.pytorch
|
torchplus/train/learning_schedules_fastai.py
|
https://github.com/traveller59/second.pytorch/blob/master/torchplus/train/learning_schedules_fastai.py
|
MIT
|
def install_with_constraints(session, *args, **kwargs):
"""Install packages constrained by Poetry's lock file.
This function is a wrapper for nox.sessions.Session.install. It
invokes pip to install packages inside of the session's virtualenv.
Additionally, pip is passed a constraints file generated from
Poetry's lock file, to ensure that the packages are pinned to the
versions specified in poetry.lock. This allows you to manage the
packages as Poetry development dependencies.
Args:
----
session: The Session object.
*args: Command-line arguments for pip.
**kwargs: Additional keyword arguments for Session.install.
"""
with tempfile.NamedTemporaryFile() as requirements:
session.run(
"poetry",
"export",
"--without-hashes",
"--with=dev",
"--format=constraints.txt",
f"--output={requirements.name}",
external=True,
)
session.install(f"--constraint={requirements.name}", *args, **kwargs)
|
Install packages constrained by Poetry's lock file.
This function is a wrapper for nox.sessions.Session.install. It
invokes pip to install packages inside of the session's virtualenv.
Additionally, pip is passed a constraints file generated from
Poetry's lock file, to ensure that the packages are pinned to the
versions specified in poetry.lock. This allows you to manage the
packages as Poetry development dependencies.
Args:
----
session: The Session object.
*args: Command-line arguments for pip.
**kwargs: Additional keyword arguments for Session.install.
|
install_with_constraints
|
python
|
JakobGM/patito
|
noxfile.py
|
https://github.com/JakobGM/patito/blob/master/noxfile.py
|
MIT
|
def test(session):
"""Run test suite using pytest + coverage + xdoctest."""
if session.python == "3.9":
# Only run test coverage and docstring tests on python 3.10
args = session.posargs # or ["--cov", "--xdoctest"]
else:
args = session.posargs
session.run(
"poetry",
"install",
"--only=main",
"--extras",
"caching pandas",
external=True,
)
install_with_constraints(
session,
# "coverage[toml]",
"pytest",
# "pytest-cov",
"xdoctest",
)
session.run("pytest", *args)
|
Run test suite using pytest + coverage + xdoctest.
|
test
|
python
|
JakobGM/patito
|
noxfile.py
|
https://github.com/JakobGM/patito/blob/master/noxfile.py
|
MIT
|
def type_check(session):
"""Run type-checking on project using pyright."""
args = session.posargs or locations
session.run(
"poetry",
"install",
"--only=main",
"--extras",
"caching pandas",
external=True,
)
install_with_constraints(
session, "mypy", "pyright", "pytest", "types-setuptools", "pandas-stubs"
)
session.run("pyright", *args)
session.run("mypy", *args)
|
Run type-checking on project using pyright.
|
type_check
|
python
|
JakobGM/patito
|
noxfile.py
|
https://github.com/JakobGM/patito/blob/master/noxfile.py
|
MIT
|
def format(session):
"""Run the ruff formatter on the entire code base."""
args = session.posargs or locations
install_with_constraints(session, "ruff")
session.run("ruff format", *args)
|
Run the ruff formatter on the entire code base.
|
format
|
python
|
JakobGM/patito
|
noxfile.py
|
https://github.com/JakobGM/patito/blob/master/noxfile.py
|
MIT
|
def __init__(self, exc: Exception, loc: Union[str, "Loc"]) -> None:
"""Wrap an error in an ErrorWrapper."""
self.exc = exc
self._loc = loc
|
Wrap an error in an ErrorWrapper.
|
__init__
|
python
|
JakobGM/patito
|
src/patito/exceptions.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/exceptions.py
|
MIT
|
def collect(
self,
*args,
**kwargs,
) -> DataFrame[ModelType]: # noqa: DAR101, DAR201
"""Collect into a DataFrame.
See documentation of polars.DataFrame.collect for full description of
parameters.
"""
background = kwargs.pop("background", False)
df: pl.DataFrame = super().collect(*args, background=background, **kwargs)
df = DataFrame(df)
if getattr(self, "model", False):
df = df.set_model(self.model)
return df
|
Collect into a DataFrame.
See documentation of polars.DataFrame.collect for full description of
parameters.
|
collect
|
python
|
JakobGM/patito
|
src/patito/polars.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/polars.py
|
MIT
|
def from_existing(cls: type[LDF], lf: pl.LazyFrame) -> LDF:
"""Construct a patito.DataFrame object from an existing polars.DataFrame object."""
if getattr(cls, "model", False):
return cls.model.LazyFrame._from_pyldf(super().lazy()._ldf) # type: ignore
return LazyFrame._from_pyldf(lf._ldf) # type: ignore
|
Construct a patito.DataFrame object from an existing polars.DataFrame object.
|
from_existing
|
python
|
JakobGM/patito
|
src/patito/polars.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/polars.py
|
MIT
|
def lazy(self: DataFrame[ModelType]) -> LazyFrame[ModelType]:
"""Convert DataFrame into LazyFrame.
See documentation of polars.DataFrame.lazy() for full description.
Returns:
A new LazyFrame object.
"""
if getattr(self, "model", False):
return self.model.LazyFrame._from_pyldf(super().lazy()._ldf) # type: ignore
return LazyFrame._from_pyldf(super().lazy()._ldf) # type: ignore
|
Convert DataFrame into LazyFrame.
See documentation of polars.DataFrame.lazy() for full description.
Returns:
A new LazyFrame object.
|
lazy
|
python
|
JakobGM/patito
|
src/patito/polars.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/polars.py
|
MIT
|
def validate(self, columns: Sequence[str] | None = None, **kwargs: Any):
"""Validate the schema and content of the dataframe.
You must invoke ``.set_model()`` before invoking ``.validate()`` in order
to specify how the dataframe should be validated.
Returns:
DataFrame[Model]: The original patito dataframe, if correctly validated.
Raises:
patito.exceptions.DataFrameValidationError: If the dataframe does not match the
specified schema.
TypeError: If ``DataFrame.set_model()`` has not been invoked prior to
validation. Note that ``patito.Model.DataFrame`` automatically invokes
``DataFrame.set_model()`` for you.
Examples:
>>> import patito as pt
>>> class Product(pt.Model):
... product_id: int = pt.Field(unique=True)
... temperature_zone: Literal["dry", "cold", "frozen"]
... is_for_sale: bool
...
>>> df = pt.DataFrame(
... {
... "product_id": [1, 1, 3],
... "temperature_zone": ["dry", "dry", "oven"],
... }
... ).set_model(Product)
>>> try:
... df.validate()
... except pt.DataFrameValidationError as exc:
... print(exc)
...
3 validation errors for Product
is_for_sale
Missing column (type=type_error.missingcolumns)
product_id
2 rows with duplicated values. (type=value_error.rowvalue)
temperature_zone
Rows with invalid values: {'oven'}. (type=value_error.rowvalue)
"""
if not hasattr(self, "model"):
raise TypeError(
f"You must invoke {self.__class__.__name__}.set_model() "
f"before invoking {self.__class__.__name__}.validate()."
)
self.model.validate(dataframe=self, columns=columns, **kwargs)
return self
|
Validate the schema and content of the dataframe.
You must invoke ``.set_model()`` before invoking ``.validate()`` in order
to specify how the dataframe should be validated.
Returns:
DataFrame[Model]: The original patito dataframe, if correctly validated.
Raises:
patito.exceptions.DataFrameValidationError: If the dataframe does not match the
specified schema.
TypeError: If ``DataFrame.set_model()`` has not been invoked prior to
validation. Note that ``patito.Model.DataFrame`` automatically invokes
``DataFrame.set_model()`` for you.
Examples:
>>> import patito as pt
>>> class Product(pt.Model):
... product_id: int = pt.Field(unique=True)
... temperature_zone: Literal["dry", "cold", "frozen"]
... is_for_sale: bool
...
>>> df = pt.DataFrame(
... {
... "product_id": [1, 1, 3],
... "temperature_zone": ["dry", "dry", "oven"],
... }
... ).set_model(Product)
>>> try:
... df.validate()
... except pt.DataFrameValidationError as exc:
... print(exc)
...
3 validation errors for Product
is_for_sale
Missing column (type=type_error.missingcolumns)
product_id
2 rows with duplicated values. (type=value_error.rowvalue)
temperature_zone
Rows with invalid values: {'oven'}. (type=value_error.rowvalue)
|
validate
|
python
|
JakobGM/patito
|
src/patito/polars.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/polars.py
|
MIT
|
def get(self, predicate: pl.Expr | None = None) -> ModelType:
"""Fetch the single row that matches the given polars predicate.
If you expect a data frame to already consist of one single row,
you can use ``.get()`` without any arguments to return that row.
Raises:
RowDoesNotExist: If zero rows evaluate to true for the given predicate.
MultipleRowsReturned: If more than one row evaluates to true for the given
predicate.
RuntimeError: The superclass of both ``RowDoesNotExist`` and
``MultipleRowsReturned`` if you want to catch both exceptions with the
same class.
Args:
predicate: A polars expression defining the criteria of the filter.
Returns:
Model: A pydantic-derived base model representing the given row.
Example:
>>> import patito as pt
>>> import polars as pl
>>> df = pt.DataFrame({"product_id": [1, 2, 3], "price": [10, 10, 20]})
The ``.get()`` will by default return a dynamically constructed pydantic
model if no model has been associated with the given dataframe:
>>> df.get(pl.col("product_id") == 1)
UntypedRow(product_id=1, price=10)
If a Patito model has been associated with the dataframe, by the use of
:ref:`DataFrame.set_model()<DataFrame.set_model>`, then the given model will
be used to represent the return type:
>>> class Product(pt.Model):
... product_id: int = pt.Field(unique=True)
... price: float
...
>>> df.set_model(Product).get(pl.col("product_id") == 1)
Product(product_id=1, price=10.0)
You can invoke ``.get()`` without any arguments on dataframes containing
exactly one row:
>>> df.filter(pl.col("product_id") == 1).get()
UntypedRow(product_id=1, price=10)
If the given predicate matches multiple rows a ``MultipleRowsReturned`` will
be raised:
>>> try:
... df.get(pl.col("price") == 10)
... except pt.exceptions.MultipleRowsReturned as e:
... print(e)
...
DataFrame.get() yielded 2 rows.
If the given predicate matches zero rows a ``RowDoesNotExist`` will
be raised:
>>> try:
... df.get(pl.col("price") == 0)
... except pt.exceptions.RowDoesNotExist as e:
... print(e)
...
DataFrame.get() yielded 0 rows.
"""
row = self if predicate is None else self.filter(predicate)
if row.height == 0:
raise RowDoesNotExist(f"{self.__class__.__name__}.get() yielded 0 rows.")
if row.height > 1:
raise MultipleRowsReturned(
f"{self.__class__.__name__}.get() yielded {row.height} rows."
)
if hasattr(self, "model"):
return self.model.from_row(row)
else:
return self._pydantic_model().from_row(row) # type: ignore
|
Fetch the single row that matches the given polars predicate.
If you expect a data frame to already consist of one single row,
you can use ``.get()`` without any arguments to return that row.
Raises:
RowDoesNotExist: If zero rows evaluate to true for the given predicate.
MultipleRowsReturned: If more than one row evaluates to true for the given
predicate.
RuntimeError: The superclass of both ``RowDoesNotExist`` and
``MultipleRowsReturned`` if you want to catch both exceptions with the
same class.
Args:
predicate: A polars expression defining the criteria of the filter.
Returns:
Model: A pydantic-derived base model representing the given row.
Example:
>>> import patito as pt
>>> import polars as pl
>>> df = pt.DataFrame({"product_id": [1, 2, 3], "price": [10, 10, 20]})
The ``.get()`` will by default return a dynamically constructed pydantic
model if no model has been associated with the given dataframe:
>>> df.get(pl.col("product_id") == 1)
UntypedRow(product_id=1, price=10)
If a Patito model has been associated with the dataframe, by the use of
:ref:`DataFrame.set_model()<DataFrame.set_model>`, then the given model will
be used to represent the return type:
>>> class Product(pt.Model):
... product_id: int = pt.Field(unique=True)
... price: float
...
>>> df.set_model(Product).get(pl.col("product_id") == 1)
Product(product_id=1, price=10.0)
You can invoke ``.get()`` without any arguments on dataframes containing
exactly one row:
>>> df.filter(pl.col("product_id") == 1).get()
UntypedRow(product_id=1, price=10)
If the given predicate matches multiple rows a ``MultipleRowsReturned`` will
be raised:
>>> try:
... df.get(pl.col("price") == 10)
... except pt.exceptions.MultipleRowsReturned as e:
... print(e)
...
DataFrame.get() yielded 2 rows.
If the given predicate matches zero rows a ``RowDoesNotExist`` will
be raised:
>>> try:
... df.get(pl.col("price") == 0)
... except pt.exceptions.RowDoesNotExist as e:
... print(e)
...
DataFrame.get() yielded 0 rows.
|
get
|
python
|
JakobGM/patito
|
src/patito/polars.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/polars.py
|
MIT
|
def iter_models(
self, validate_df: bool = True, validate_model: bool = False
) -> ModelGenerator[ModelType]:
"""Iterate over all rows in the dataframe as pydantic models.
Args:
validate_df: If set to ``True``, the dataframe will be validated before
making models out of each row. If set to ``False``, beware that columns
need to be the exact same as the model fields.
validate_model: If set to ``True``, each model will be validated when
constructing. Disabled by default since df validation should cover this case.
Yields:
Model: A pydantic-derived model representing the given row. .to_list() can be
used to convert the iterator to a list.
Raises:
TypeError: If ``DataFrame.set_model()`` has not been invoked prior to
iteration.
Example:
>>> import patito as pt
>>> import polars as pl
>>> class Product(pt.Model):
... product_id: int = pt.Field(unique=True)
... price: float
>>> df = pt.DataFrame({"product_id": [1, 2], "price": [10., 20.]})
>>> df = df.set_model(Product)
>>> for product in df.iter_models():
... print(product)
...
Product(product_id=1, price=10.0)
Product(product_id=2, price=20.0)
"""
if not hasattr(self, "model"):
raise TypeError(
f"You must invoke {self.__class__.__name__}.set_model() "
f"before invoking {self.__class__.__name__}.iter_models()."
)
df = self.validate(drop_superfluous_columns=True) if validate_df else self
def _iter_models_with_validate(
_df: DataFrame[ModelType],
) -> Iterator[ModelType]:
for row in _df.iter_rows(named=True):
yield self.model(**row)
def _iter_models_without_validate(
_df: DataFrame[ModelType],
) -> Iterator[ModelType]:
for row in _df.iter_rows(named=True):
yield self.model.model_construct(**row)
_iter_models = (
_iter_models_with_validate
if validate_model
else _iter_models_without_validate
)
return ModelGenerator(_iter_models(df))
|
Iterate over all rows in the dataframe as pydantic models.
Args:
validate_df: If set to ``True``, the dataframe will be validated before
making models out of each row. If set to ``False``, beware that columns
need to be the exact same as the model fields.
validate_model: If set to ``True``, each model will be validated when
constructing. Disabled by default since df validation should cover this case.
Yields:
Model: A pydantic-derived model representing the given row. .to_list() can be
used to convert the iterator to a list.
Raises:
TypeError: If ``DataFrame.set_model()`` has not been invoked prior to
iteration.
Example:
>>> import patito as pt
>>> import polars as pl
>>> class Product(pt.Model):
... product_id: int = pt.Field(unique=True)
... price: float
>>> df = pt.DataFrame({"product_id": [1, 2], "price": [10., 20.]})
>>> df = df.set_model(Product)
>>> for product in df.iter_models():
... print(product)
...
Product(product_id=1, price=10.0)
Product(product_id=2, price=20.0)
|
iter_models
|
python
|
JakobGM/patito
|
src/patito/polars.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/polars.py
|
MIT
|
def _pydantic_model(self) -> type[Model]:
"""Dynamically construct patito model compliant with dataframe.
Returns:
A pydantic model class where all the rows have been specified as
`typing.Any` fields.
"""
from patito.pydantic import Model
pydantic_annotations = {column: (Any, ...) for column in self.columns}
return cast(
type[Model],
create_model( # type: ignore
"UntypedRow",
__base__=Model,
**pydantic_annotations, # pyright: ignore
),
)
|
Dynamically construct patito model compliant with dataframe.
Returns:
A pydantic model class where all the rows have been specified as
`typing.Any` fields.
|
_pydantic_model
|
python
|
JakobGM/patito
|
src/patito/polars.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/polars.py
|
MIT
|
def __init__(cls, name: str, bases: tuple, clsdict: dict, **kwargs) -> None:
"""Construct new patito model.
Args:
name: Name of model class.
bases: Tuple of superclasses.
clsdict: Dictionary containing class properties.
**kwargs: Additional keyword arguments.
"""
super().__init__(name, bases, clsdict, **kwargs)
NewDataFrame = type(
f"{cls.__name__}DataFrame",
(DataFrame,),
{"model": cls},
)
cls.DataFrame: type[DataFrame[cls]] = NewDataFrame # type: ignore
NewLazyFrame = type(
f"{cls.__name__}LazyFrame",
(LazyFrame,),
{"model": cls},
)
cls.LazyFrame: type[LazyFrame[cls]] = NewLazyFrame # type: ignore
|
Construct new patito model.
Args:
name: Name of model class.
bases: Tuple of superclasses.
clsdict: Dictionary containing class properties.
**kwargs: Additional keyword arguments.
|
__init__
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def valid_dtypes(
cls: type[Model],
) -> Mapping[str, frozenset[DataTypeClass | DataType]]:
"""Return a list of polars dtypes which Patito considers valid for each field.
The first item of each list is the default dtype chosen by Patito.
Returns:
A dictionary mapping each column string name to a list of valid dtypes.
Raises:
NotImplementedError: If one or more model fields are annotated with types
not compatible with polars.
"""
return valid_dtypes_for_model(cls)
|
Return a list of polars dtypes which Patito considers valid for each field.
The first item of each list is the default dtype chosen by Patito.
Returns:
A dictionary mapping each column string name to a list of valid dtypes.
Raises:
NotImplementedError: If one or more model fields are annotated with types
not compatible with polars.
|
valid_dtypes
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def defaults(cls: type[Model]) -> dict[str, Any]:
"""Return default field values specified on the model.
Returns:
Dictionary containing fields with their respective default values.
Example:
>>> from typing_extensions import Literal
>>> import patito as pt
>>> class Product(pt.Model):
... name: str
... price: int = 0
... temperature_zone: Literal["dry", "cold", "frozen"] = "dry"
...
>>> Product.defaults
{'price': 0, 'temperature_zone': 'dry'}
"""
return {
field_name: props["default"]
for field_name, props in cls._schema_properties().items()
if "default" in props
}
|
Return default field values specified on the model.
Returns:
Dictionary containing fields with their respective default values.
Example:
>>> from typing_extensions import Literal
>>> import patito as pt
>>> class Product(pt.Model):
... name: str
... price: int = 0
... temperature_zone: Literal["dry", "cold", "frozen"] = "dry"
...
>>> Product.defaults
{'price': 0, 'temperature_zone': 'dry'}
|
defaults
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def non_nullable_columns(cls: type[Model]) -> set[str]:
"""Return names of those columns that are non-nullable in the schema.
Returns:
Set of column name strings.
Example:
>>> from typing import Optional
>>> import patito as pt
>>> class MyModel(pt.Model):
... nullable_field: Optional[int]
... another_nullable_field: Optional[int] = None
... non_nullable_field: int
... another_non_nullable_field: str
...
>>> sorted(MyModel.non_nullable_columns)
['another_non_nullable_field', 'non_nullable_field']
"""
return set(
k
for k in cls.columns
if not (
is_optional(cls.model_fields[k].annotation)
or cls.model_fields[k].annotation is type(None)
)
)
|
Return names of those columns that are non-nullable in the schema.
Returns:
Set of column name strings.
Example:
>>> from typing import Optional
>>> import patito as pt
>>> class MyModel(pt.Model):
... nullable_field: Optional[int]
... another_nullable_field: Optional[int] = None
... non_nullable_field: int
... another_non_nullable_field: str
...
>>> sorted(MyModel.non_nullable_columns)
['another_non_nullable_field', 'non_nullable_field']
|
non_nullable_columns
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def unique_columns(cls: type[Model]) -> set[str]:
"""Return columns with uniqueness constraint.
Returns:
Set of column name strings.
Example:
>>> from typing import Optional
>>> import patito as pt
>>> class Product(pt.Model):
... product_id: int = pt.Field(unique=True)
... barcode: Optional[str] = pt.Field(unique=True)
... name: str
...
>>> sorted(Product.unique_columns)
['barcode', 'product_id']
"""
infos = cls.column_infos
return {column for column in cls.columns if infos[column].unique}
|
Return columns with uniqueness constraint.
Returns:
Set of column name strings.
Example:
>>> from typing import Optional
>>> import patito as pt
>>> class Product(pt.Model):
... product_id: int = pt.Field(unique=True)
... barcode: Optional[str] = pt.Field(unique=True)
... name: str
...
>>> sorted(Product.unique_columns)
['barcode', 'product_id']
|
unique_columns
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def derived_columns(cls: type[Model]) -> set[str]:
"""Return set of columns which are derived from other columns."""
infos = cls.column_infos
return {
column for column in cls.columns if infos[column].derived_from is not None
}
|
Return set of columns which are derived from other columns.
|
derived_columns
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def validate_schema(cls: type[ModelType]):
"""Users should run this after defining or edit a model. We withhold the checks at model definition time to avoid expensive queries of the model schema."""
for column in cls.columns:
col_info = cls.column_infos[column]
field_info = cls.model_fields[column]
if col_info.dtype:
validate_polars_dtype(
annotation=field_info.annotation, dtype=col_info.dtype
)
else:
validate_annotation(field_info.annotation)
|
Users should run this after defining or edit a model. We withhold the checks at model definition time to avoid expensive queries of the model schema.
|
validate_schema
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def from_row(
cls: type[ModelType],
row: pd.DataFrame | pl.DataFrame,
validate: bool = True,
) -> ModelType:
"""Represent a single data frame row as a Patito model.
Args:
row: A dataframe, either polars and pandas, consisting of a single row.
validate: If ``False``, skip pydantic validation of the given row data.
Returns:
Model: A patito model representing the given row data.
Raises:
TypeError: If the given type is neither a pandas or polars DataFrame.
Example:
>>> import patito as pt
>>> import polars as pl
>>> class Product(pt.Model):
... product_id: int
... name: str
... price: float
...
>>> df = pl.DataFrame(
... [["1", "product name", "1.22"]],
... schema=["product_id", "name", "price"],
... orient="row",
... )
>>> Product.from_row(df)
Product(product_id=1, name='product name', price=1.22)
>>> Product.from_row(df, validate=False)
Product(product_id='1', name='product name', price='1.22')
"""
if isinstance(row, pl.DataFrame):
dataframe = row
elif _PANDAS_AVAILABLE and isinstance(row, pd.DataFrame):
dataframe = pl.DataFrame._from_pandas(row)
elif _PANDAS_AVAILABLE and isinstance(row, pd.Series):
return cls(**dict(row.items())) # type: ignore[unreachable]
else:
raise TypeError(f"{cls.__name__}.from_row not implemented for {type(row)}.")
return cls._from_polars(dataframe=dataframe, validate=validate)
|
Represent a single data frame row as a Patito model.
Args:
row: A dataframe, either polars and pandas, consisting of a single row.
validate: If ``False``, skip pydantic validation of the given row data.
Returns:
Model: A patito model representing the given row data.
Raises:
TypeError: If the given type is neither a pandas or polars DataFrame.
Example:
>>> import patito as pt
>>> import polars as pl
>>> class Product(pt.Model):
... product_id: int
... name: str
... price: float
...
>>> df = pl.DataFrame(
... [["1", "product name", "1.22"]],
... schema=["product_id", "name", "price"],
... orient="row",
... )
>>> Product.from_row(df)
Product(product_id=1, name='product name', price=1.22)
>>> Product.from_row(df, validate=False)
Product(product_id='1', name='product name', price='1.22')
|
from_row
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def _from_polars(
cls: type[ModelType],
dataframe: pl.DataFrame,
validate: bool = True,
) -> ModelType:
"""Construct model from a single polars row.
Args:
dataframe: A polars dataframe consisting of one single row.
validate: If ``True``, run the pydantic validators. If ``False``, pydantic
will not cast any types in the resulting object.
Returns:
Model: A pydantic model object representing the given polars row.
Raises:
TypeError: If the provided ``dataframe`` argument is not of type
``polars.DataFrame``.
ValueError: If the given ``dataframe`` argument does not consist of exactly
one row.
Example:
>>> import patito as pt
>>> import polars as pl
>>> class Product(pt.Model):
... product_id: int
... name: str
... price: float
...
>>> df = pl.DataFrame(
... [["1", "product name", "1.22"]],
... schema=["product_id", "name", "price"],
... orient="row",
... )
>>> Product._from_polars(df)
Product(product_id=1, name='product name', price=1.22)
>>> Product._from_polars(df, validate=False)
Product(product_id='1', name='product name', price='1.22')
"""
if not isinstance(dataframe, pl.DataFrame):
raise TypeError(
f"{cls.__name__}._from_polars() must be invoked with polars.DataFrame, "
f"not {type(dataframe)}!"
)
elif len(dataframe) != 1:
raise ValueError(
f"{cls.__name__}._from_polars() can only be invoked with exactly "
f"1 row, while {len(dataframe)} rows were provided."
)
# We have been provided with a single polars.DataFrame row
# Convert to the equivalent keyword invocation of the pydantic model
if validate:
return cls(**dataframe.to_dicts()[0])
else:
return cls.model_construct(**dataframe.to_dicts()[0])
|
Construct model from a single polars row.
Args:
dataframe: A polars dataframe consisting of one single row.
validate: If ``True``, run the pydantic validators. If ``False``, pydantic
will not cast any types in the resulting object.
Returns:
Model: A pydantic model object representing the given polars row.
Raises:
TypeError: If the provided ``dataframe`` argument is not of type
``polars.DataFrame``.
ValueError: If the given ``dataframe`` argument does not consist of exactly
one row.
Example:
>>> import patito as pt
>>> import polars as pl
>>> class Product(pt.Model):
... product_id: int
... name: str
... price: float
...
>>> df = pl.DataFrame(
... [["1", "product name", "1.22"]],
... schema=["product_id", "name", "price"],
... orient="row",
... )
>>> Product._from_polars(df)
Product(product_id=1, name='product name', price=1.22)
>>> Product._from_polars(df, validate=False)
Product(product_id='1', name='product name', price='1.22')
|
_from_polars
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def validate(
cls: type[ModelType],
dataframe: pd.DataFrame | pl.DataFrame,
columns: Sequence[str] | None = None,
allow_missing_columns: bool = False,
allow_superfluous_columns: bool = False,
drop_superfluous_columns: bool = False,
) -> DataFrame[ModelType]:
"""Validate the schema and content of the given dataframe.
Args:
dataframe: Polars DataFrame to be validated.
columns: Optional list of columns to validate. If not provided, all columns
of the dataframe will be validated.
allow_missing_columns: If True, missing columns will not be considered an error.
allow_superfluous_columns: If True, additional columns will not be considered an error.
drop_superfluous_columns: If True, columns not present in the model will be
dropped from the resulting dataframe.
Returns:
DataFrame: A patito DataFrame containing the validated data.
Raises:
patito.exceptions.DataFrameValidationError: If the given dataframe does not match
the given schema.
Examples:
>>> import patito as pt
>>> import polars as pl
>>> class Product(pt.Model):
... product_id: int = pt.Field(unique=True)
... temperature_zone: Literal["dry", "cold", "frozen"]
... is_for_sale: bool
...
>>> df = pl.DataFrame(
... {
... "product_id": [1, 1, 3],
... "temperature_zone": ["dry", "dry", "oven"],
... }
... )
>>> try:
... Product.validate(df)
... except pt.DataFrameValidationError as exc:
... print(exc)
...
3 validation errors for Product
is_for_sale
Missing column (type=type_error.missingcolumns)
product_id
2 rows with duplicated values. (type=value_error.rowvalue)
temperature_zone
Rows with invalid values: {'oven'}. (type=value_error.rowvalue)
"""
validate(
dataframe=dataframe,
schema=cls,
columns=columns,
allow_missing_columns=allow_missing_columns,
allow_superfluous_columns=allow_superfluous_columns,
drop_superfluous_columns=drop_superfluous_columns,
)
return cls.DataFrame(dataframe)
|
Validate the schema and content of the given dataframe.
Args:
dataframe: Polars DataFrame to be validated.
columns: Optional list of columns to validate. If not provided, all columns
of the dataframe will be validated.
allow_missing_columns: If True, missing columns will not be considered an error.
allow_superfluous_columns: If True, additional columns will not be considered an error.
drop_superfluous_columns: If True, columns not present in the model will be
dropped from the resulting dataframe.
Returns:
DataFrame: A patito DataFrame containing the validated data.
Raises:
patito.exceptions.DataFrameValidationError: If the given dataframe does not match
the given schema.
Examples:
>>> import patito as pt
>>> import polars as pl
>>> class Product(pt.Model):
... product_id: int = pt.Field(unique=True)
... temperature_zone: Literal["dry", "cold", "frozen"]
... is_for_sale: bool
...
>>> df = pl.DataFrame(
... {
... "product_id": [1, 1, 3],
... "temperature_zone": ["dry", "dry", "oven"],
... }
... )
>>> try:
... Product.validate(df)
... except pt.DataFrameValidationError as exc:
... print(exc)
...
3 validation errors for Product
is_for_sale
Missing column (type=type_error.missingcolumns)
product_id
2 rows with duplicated values. (type=value_error.rowvalue)
temperature_zone
Rows with invalid values: {'oven'}. (type=value_error.rowvalue)
|
validate
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def iter_models(
cls: type[ModelType], dataframe: pd.DataFrame | pl.DataFrame
) -> ModelGenerator[ModelType]:
"""Validate the dataframe and iterate over the rows, yielding Patito models.
Args:
dataframe: Polars or pandas DataFrame to be validated.
Returns:
ListableIterator: An iterator of patito models over the validated data.
Raises:
patito.exceptions.DataFrameValidationError: If the given dataframe does not match
the given schema.
"""
return cls.DataFrame(dataframe).iter_models()
|
Validate the dataframe and iterate over the rows, yielding Patito models.
Args:
dataframe: Polars or pandas DataFrame to be validated.
Returns:
ListableIterator: An iterator of patito models over the validated data.
Raises:
patito.exceptions.DataFrameValidationError: If the given dataframe does not match
the given schema.
|
iter_models
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def example_value( # noqa: C901
cls,
field: str | None = None,
properties: dict[str, Any] | None = None,
) -> date | datetime | time | timedelta | float | int | str | None | Mapping | list:
"""Return a valid example value for the given model field.
Args:
field: Field name identifier.
properties: Pydantic v2-style properties dict
Returns:
A single value which is consistent with the given field definition.
Raises:
NotImplementedError: If the given field has no example generator.
Example:
>>> from typing import Literal
>>> import patito as pt
>>> class Product(pt.Model):
... product_id: int = pt.Field(unique=True)
... name: str
... temperature_zone: Literal["dry", "cold", "frozen"]
...
>>> Product.example_value("product_id")
-1
>>> Product.example_value("name")
'dummy_string'
>>> Product.example_value("temperature_zone")
'dry'
"""
if field is None and properties is None:
raise ValueError(
"Either 'field' or 'properties' must be provided as argument."
)
if field is not None and properties is not None:
raise ValueError(
"Only one of 'field' or 'properties' can be provided as argument."
)
if field:
properties = cls._schema_properties()[field]
info = cls.column_infos[field]
else:
info = ColumnInfo()
properties = properties or {}
if "type" in properties:
field_type = properties["type"]
elif "anyOf" in properties:
allowable = [x["type"] for x in properties["anyOf"] if "type" in x]
if "null" in allowable:
field_type = "null"
else:
field_type = allowable[0]
else:
raise NotImplementedError(
f"Field type for {properties['title']} not found."
)
if "const" in properties:
# The default value is the only valid value, provided as const
return properties["const"]
elif "default" in properties:
# A default value has been specified in the model field definition
return properties["default"]
elif not properties.get("required", True):
return None
elif field_type == "null":
return None
elif "enum" in properties:
return properties["enum"][0]
elif field_type in {"integer", "number"}:
# For integer and float types we must check if there are imposed bounds
minimum = properties.get("minimum")
exclusive_minimum = properties.get("exclusiveMinimum")
maximum = properties.get("maximum")
exclusive_maximum = properties.get("exclusiveMaximum")
lower = minimum if minimum is not None else exclusive_minimum
upper = maximum if maximum is not None else exclusive_maximum
# If the dtype is an unsigned integer type, we must return a positive value
if info.dtype:
dtype = info.dtype
if dtype in (pl.UInt8, pl.UInt16, pl.UInt32, pl.UInt64):
lower = 0 if lower is None else max(lower, 0)
# First we check the simple case, no upper or lower bound
if lower is None and upper is None:
if field_type == "number":
return -0.5
else:
return -1
# If we have a lower and upper bound, we return something in the middle
elif lower is not None and upper is not None:
if field_type == "number":
return (lower + upper) / 2
else:
return (lower + upper) // 2
# What remains is a single-sided bound, which we will return a value on the
# "right side" of.
number = float if field_type == "number" else int
if lower is not None:
return number(lower + 1)
else:
return number(cast(float, upper) - 1)
elif field_type == "string":
if "pattern" in properties:
raise NotImplementedError(
"Example data generation has not been implemented for regex "
"patterns. You must valid data for such columns explicitly!"
)
elif "format" in properties and properties["format"] == "date":
return date(year=1970, month=1, day=1)
elif "format" in properties and properties["format"] == "date-time":
if "column_info" in properties:
ci = ColumnInfo.model_validate_json(properties["column_info"])
dtype = ci.dtype
if getattr(dtype, "time_zone", None) is not None:
tzinfo = ZoneInfo(dtype.time_zone) # type: ignore
else:
tzinfo = None
return datetime(year=1970, month=1, day=1, tzinfo=tzinfo)
return datetime(year=1970, month=1, day=1)
elif "format" in properties and properties["format"] == "time":
return time(12, 30)
elif "format" in properties and properties["format"] == "duration":
return timedelta(1)
elif "minLength" in properties:
return "a" * properties["minLength"]
elif "maxLength" in properties:
return "a" * min(properties["maxLength"], 1)
else:
return "dummy_string"
elif field_type == "boolean":
return False
elif field_type == "object":
try:
props_o = cls.model_schema["$defs"][properties["title"]]["properties"]
return {f: cls.example_value(properties=props_o[f]) for f in props_o}
except AttributeError as err:
raise NotImplementedError(
"Nested example generation only supported for nested pt.Model classes."
) from err
elif field_type == "array":
return [cls.example_value(properties=properties["items"])]
else: # pragma: no cover
raise NotImplementedError
|
Return a valid example value for the given model field.
Args:
field: Field name identifier.
properties: Pydantic v2-style properties dict
Returns:
A single value which is consistent with the given field definition.
Raises:
NotImplementedError: If the given field has no example generator.
Example:
>>> from typing import Literal
>>> import patito as pt
>>> class Product(pt.Model):
... product_id: int = pt.Field(unique=True)
... name: str
... temperature_zone: Literal["dry", "cold", "frozen"]
...
>>> Product.example_value("product_id")
-1
>>> Product.example_value("name")
'dummy_string'
>>> Product.example_value("temperature_zone")
'dry'
|
example_value
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def example(
cls: type[ModelType],
**kwargs: Any, # noqa: ANN401
) -> ModelType:
"""Produce model instance with filled dummy data for all unspecified fields.
The type annotation of unspecified field is used to fill in type-correct
dummy data, e.g. ``-1`` for ``int``, ``"dummy_string"`` for ``str``, and so
on...
The first item of ``typing.Literal`` annotations are used for dummy values.
Args:
**kwargs: Provide explicit values for any fields which should `not` be
filled with dummy data.
Returns:
Model: A pydantic model object filled with dummy data for all unspecified
model fields.
Raises:
TypeError: If one or more of the provided keyword arguments do not match any
fields on the model.
Example:
>>> from typing import Literal
>>> import patito as pt
>>> class Product(pt.Model):
... product_id: int = pt.Field(unique=True)
... name: str
... temperature_zone: Literal["dry", "cold", "frozen"]
...
>>> Product.example(product_id=1)
Product(product_id=1, name='dummy_string', temperature_zone='dry')
"""
# Non-iterable values besides strings must be repeated
wrong_columns = set(kwargs.keys()) - set(cls.columns)
if wrong_columns:
raise TypeError(f"{cls.__name__} does not contain fields {wrong_columns}!")
new_kwargs = {}
for field_name in cls._schema_properties().keys():
if field_name in kwargs:
# The value has been explicitly specified
new_kwargs[field_name] = kwargs[field_name]
else:
new_kwargs[field_name] = cls.example_value(field=field_name)
return cls(**new_kwargs)
|
Produce model instance with filled dummy data for all unspecified fields.
The type annotation of unspecified field is used to fill in type-correct
dummy data, e.g. ``-1`` for ``int``, ``"dummy_string"`` for ``str``, and so
on...
The first item of ``typing.Literal`` annotations are used for dummy values.
Args:
**kwargs: Provide explicit values for any fields which should `not` be
filled with dummy data.
Returns:
Model: A pydantic model object filled with dummy data for all unspecified
model fields.
Raises:
TypeError: If one or more of the provided keyword arguments do not match any
fields on the model.
Example:
>>> from typing import Literal
>>> import patito as pt
>>> class Product(pt.Model):
... product_id: int = pt.Field(unique=True)
... name: str
... temperature_zone: Literal["dry", "cold", "frozen"]
...
>>> Product.example(product_id=1)
Product(product_id=1, name='dummy_string', temperature_zone='dry')
|
example
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def pandas_examples(
cls: type[ModelType],
data: dict | Iterable,
columns: Iterable[str] | None = None,
) -> pd.DataFrame:
"""Generate dataframe with dummy data for all unspecified columns.
Offers the same API as the pandas.DataFrame constructor.
Non-iterable values, besides strings, are repeated until they become as long as
the iterable arguments.
Args:
data: Data to populate the dummy dataframe with. If
not a dict, column names must also be provided.
columns: Ignored if data is a dict. If
data is an iterable, it will be used as the column names in the
resulting dataframe. Defaults to None.
Returns:
A pandas DataFrame filled with dummy example data.
Raises:
ImportError: If pandas has not been installed. You should install
patito[pandas] in order to integrate patito with pandas.
TypeError: If column names have not been specified in the input data.
Example:
>>> from typing import Literal
>>> import patito as pt
>>> class Product(pt.Model):
... product_id: int = pt.Field(unique=True)
... name: str
... temperature_zone: Literal["dry", "cold", "frozen"]
...
>>> Product.pandas_examples({"name": ["product A", "product B"]})
product_id name temperature_zone
0 -1 product A dry
1 -1 product B dry
"""
if not _PANDAS_AVAILABLE:
# Re-trigger the import error, but this time don't catch it
raise ImportError("No module named 'pandas'")
if not isinstance(data, dict):
if columns is None:
raise TypeError(
f"{cls.__name__}.pandas_examples() must "
"be provided with column names!"
)
kwargs = dict(zip(columns, zip(*data)))
else:
kwargs = data
kwargs = {
key: (
value
if isinstance(value, Iterable) and not isinstance(value, str)
else itertools.cycle([value])
)
for key, value in kwargs.items()
}
dummies = []
for values in zip(*kwargs.values()):
dummies.append(cls.example(**dict(zip(kwargs.keys(), values))))
return pd.DataFrame([dummy.model_dump() for dummy in dummies])
|
Generate dataframe with dummy data for all unspecified columns.
Offers the same API as the pandas.DataFrame constructor.
Non-iterable values, besides strings, are repeated until they become as long as
the iterable arguments.
Args:
data: Data to populate the dummy dataframe with. If
not a dict, column names must also be provided.
columns: Ignored if data is a dict. If
data is an iterable, it will be used as the column names in the
resulting dataframe. Defaults to None.
Returns:
A pandas DataFrame filled with dummy example data.
Raises:
ImportError: If pandas has not been installed. You should install
patito[pandas] in order to integrate patito with pandas.
TypeError: If column names have not been specified in the input data.
Example:
>>> from typing import Literal
>>> import patito as pt
>>> class Product(pt.Model):
... product_id: int = pt.Field(unique=True)
... name: str
... temperature_zone: Literal["dry", "cold", "frozen"]
...
>>> Product.pandas_examples({"name": ["product A", "product B"]})
product_id name temperature_zone
0 -1 product A dry
1 -1 product B dry
|
pandas_examples
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def join(
cls: type[Model],
other: type[Model],
how: Literal["inner", "left", "outer", "asof", "cross", "semi", "anti"],
) -> type[Model]:
"""Dynamically create a new model compatible with an SQL Join operation.
For instance, ``ModelA.join(ModelB, how="left")`` will create a model containing
all the fields of ``ModelA`` and ``ModelB``, but where all fields of ``ModelB``
has been made ``Optional``, i.e. nullable. This is consistent with the LEFT JOIN
SQL operation making all the columns of the right table nullable.
Args:
other: Another patito Model class.
how: The type of SQL Join operation.
Returns:
A new model type compatible with the resulting schema produced by the given
join operation.
Examples:
>>> class A(Model):
... a: int
...
>>> class B(Model):
... b: int
...
>>> InnerJoinedModel = A.join(B, how="inner")
>>> InnerJoinedModel.columns
['a', 'b']
>>> InnerJoinedModel.nullable_columns
set()
>>> LeftJoinedModel = A.join(B, how="left")
>>> LeftJoinedModel.nullable_columns
{'b'}
>>> OuterJoinedModel = A.join(B, how="outer")
>>> sorted(OuterJoinedModel.nullable_columns)
['a', 'b']
>>> A.join(B, how="anti") is A
True
"""
if how in {"semi", "anti"}:
return cls
kwargs: dict[str, Any] = {}
for model, nullable_methods in (
(cls, {"outer"}),
(other, {"left", "outer", "asof"}),
):
for field_name, field in model.model_fields.items():
make_nullable = how in nullable_methods and type(None) not in get_args(
field.annotation
)
kwargs[field_name] = cls._derive_field(
field, make_nullable=make_nullable
)
return create_model(
f"{cls.__name__}{how.capitalize()}Join{other.__name__}",
**kwargs,
__base__=Model,
)
|
Dynamically create a new model compatible with an SQL Join operation.
For instance, ``ModelA.join(ModelB, how="left")`` will create a model containing
all the fields of ``ModelA`` and ``ModelB``, but where all fields of ``ModelB``
has been made ``Optional``, i.e. nullable. This is consistent with the LEFT JOIN
SQL operation making all the columns of the right table nullable.
Args:
other: Another patito Model class.
how: The type of SQL Join operation.
Returns:
A new model type compatible with the resulting schema produced by the given
join operation.
Examples:
>>> class A(Model):
... a: int
...
>>> class B(Model):
... b: int
...
>>> InnerJoinedModel = A.join(B, how="inner")
>>> InnerJoinedModel.columns
['a', 'b']
>>> InnerJoinedModel.nullable_columns
set()
>>> LeftJoinedModel = A.join(B, how="left")
>>> LeftJoinedModel.nullable_columns
{'b'}
>>> OuterJoinedModel = A.join(B, how="outer")
>>> sorted(OuterJoinedModel.nullable_columns)
['a', 'b']
>>> A.join(B, how="anti") is A
True
|
join
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def select(cls: type[ModelType], fields: str | Iterable[str]) -> type[Model]:
"""Create a new model consisting of only a subset of the model fields.
Args:
fields: A single field name as a string or a collection of strings.
Returns:
A new model containing only the fields specified by ``fields``.
Raises:
ValueError: If one or more non-existent fields are selected.
Example:
>>> class MyModel(Model):
... a: int
... b: int
... c: int
...
>>> MyModel.select("a").columns
['a']
>>> sorted(MyModel.select(["b", "c"]).columns)
['b', 'c']
"""
if isinstance(fields, str):
fields = [fields]
fields = set(fields)
non_existent_fields = fields - set(cls.columns)
if non_existent_fields:
raise ValueError(
f"The following selected fields do not exist: {non_existent_fields}"
)
mapping = {field_name: field_name for field_name in fields}
return cls._derive_model(
model_name=f"Selected{cls.__name__}", field_mapping=mapping
)
|
Create a new model consisting of only a subset of the model fields.
Args:
fields: A single field name as a string or a collection of strings.
Returns:
A new model containing only the fields specified by ``fields``.
Raises:
ValueError: If one or more non-existent fields are selected.
Example:
>>> class MyModel(Model):
... a: int
... b: int
... c: int
...
>>> MyModel.select("a").columns
['a']
>>> sorted(MyModel.select(["b", "c"]).columns)
['b', 'c']
|
select
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def drop(cls: type[ModelType], name: str | Iterable[str]) -> type[Model]:
"""Return a new model where one or more fields are excluded.
Args:
name: A single string field name, or a list of such field names,
which will be dropped.
Returns:
New model class where the given fields have been removed.
Examples:
>>> class MyModel(Model):
... a: int
... b: int
... c: int
...
>>> MyModel.columns
['a', 'b', 'c']
>>> MyModel.drop("c").columns
['a', 'b']
>>> MyModel.drop(["b", "c"]).columns
['a']
"""
dropped_columns = {name} if isinstance(name, str) else set(name)
mapping = {
field_name: field_name
for field_name in cls.columns
if field_name not in dropped_columns
}
return cls._derive_model(
model_name=f"Dropped{cls.__name__}",
field_mapping=mapping,
)
|
Return a new model where one or more fields are excluded.
Args:
name: A single string field name, or a list of such field names,
which will be dropped.
Returns:
New model class where the given fields have been removed.
Examples:
>>> class MyModel(Model):
... a: int
... b: int
... c: int
...
>>> MyModel.columns
['a', 'b', 'c']
>>> MyModel.drop("c").columns
['a', 'b']
>>> MyModel.drop(["b", "c"]).columns
['a']
|
drop
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def prefix(cls: type[ModelType], prefix: str) -> type[Model]:
"""Return a new model where all field names have been prefixed.
Args:
prefix: String prefix to add to all field names.
Returns:
New model class with all the same fields only prefixed with the given prefix.
Example:
>>> class MyModel(Model):
... a: int
... b: int
...
>>> MyModel.prefix("x_").columns
['x_a', 'x_b']
"""
mapping = {f"{prefix}{field_name}": field_name for field_name in cls.columns}
return cls._derive_model(
model_name="Prefixed{cls.__name__}",
field_mapping=mapping,
)
|
Return a new model where all field names have been prefixed.
Args:
prefix: String prefix to add to all field names.
Returns:
New model class with all the same fields only prefixed with the given prefix.
Example:
>>> class MyModel(Model):
... a: int
... b: int
...
>>> MyModel.prefix("x_").columns
['x_a', 'x_b']
|
prefix
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def suffix(cls: type[ModelType], suffix: str) -> type[Model]:
"""Return a new model where all field names have been suffixed.
Args:
suffix: String suffix to add to all field names.
Returns:
New model class with all the same fields only suffixed with the given
suffix.
Example:
>>> class MyModel(Model):
... a: int
... b: int
...
>>> MyModel.suffix("_x").columns
['a_x', 'b_x']
"""
mapping = {f"{field_name}{suffix}": field_name for field_name in cls.columns}
return cls._derive_model(
model_name="Suffixed{cls.__name__}",
field_mapping=mapping,
)
|
Return a new model where all field names have been suffixed.
Args:
suffix: String suffix to add to all field names.
Returns:
New model class with all the same fields only suffixed with the given
suffix.
Example:
>>> class MyModel(Model):
... a: int
... b: int
...
>>> MyModel.suffix("_x").columns
['a_x', 'b_x']
|
suffix
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def rename(cls: type[ModelType], mapping: dict[str, str]) -> type[Model]:
"""Return a new model class where the specified fields have been renamed.
Args:
mapping: A dictionary where the keys are the old field names
and the values are the new names.
Returns:
A new model class where the given fields have been renamed.
Raises:
ValueError: If non-existent fields are renamed.
Example:
>>> class MyModel(Model):
... a: int
... b: int
...
>>> MyModel.rename({"a": "A"}).columns
['b', 'A']
"""
non_existent_fields = set(mapping.keys()) - set(cls.columns)
if non_existent_fields:
raise ValueError(
f"The following fields do not exist for renaming: {non_existent_fields}"
)
field_mapping = {
field_name: field_name
for field_name in cls.columns
if field_name not in mapping
}
field_mapping.update({value: key for key, value in mapping.items()})
return cls._derive_model(
model_name=f"Renamed{cls.__name__}",
field_mapping=field_mapping,
)
|
Return a new model class where the specified fields have been renamed.
Args:
mapping: A dictionary where the keys are the old field names
and the values are the new names.
Returns:
A new model class where the given fields have been renamed.
Raises:
ValueError: If non-existent fields are renamed.
Example:
>>> class MyModel(Model):
... a: int
... b: int
...
>>> MyModel.rename({"a": "A"}).columns
['b', 'A']
|
rename
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def with_fields(
cls: type[ModelType],
**field_definitions: Any, # noqa: ANN401
) -> type[Model]:
"""Return a new model class where the given fields have been added.
Args:
**field_definitions: the keywords are of the form:
``field_name=(field_type, field_default)``.
Specify ``...`` if no default value is provided.
For instance, ``column_name=(int, ...)`` will create a new non-optional
integer field named ``"column_name"``.
Returns:
A new model with all the original fields and the additional field
definitions.
Example:
>>> class MyModel(Model):
... a: int
...
>>> class ExpandedModel(MyModel):
... b: int
...
>>> MyModel.with_fields(b=(int, ...)).columns == ExpandedModel.columns
True
"""
fields = {field_name: field_name for field_name in cls.columns}
fields.update(field_definitions)
return cls._derive_model(
model_name=f"Expanded{cls.__name__}",
field_mapping=fields,
)
|
Return a new model class where the given fields have been added.
Args:
**field_definitions: the keywords are of the form:
``field_name=(field_type, field_default)``.
Specify ``...`` if no default value is provided.
For instance, ``column_name=(int, ...)`` will create a new non-optional
integer field named ``"column_name"``.
Returns:
A new model with all the original fields and the additional field
definitions.
Example:
>>> class MyModel(Model):
... a: int
...
>>> class ExpandedModel(MyModel):
... b: int
...
>>> MyModel.with_fields(b=(int, ...)).columns == ExpandedModel.columns
True
|
with_fields
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def _derive_model(
cls: type[ModelType],
model_name: str,
field_mapping: dict[str, Any],
) -> type[Model]:
"""Derive a new model with new field definitions.
Args:
model_name: Name of new model class.
field_mapping: A mapping where the keys represent field names and the values
represent field definitions. String field definitions are used as
pointers to the original fields by name. Otherwise, specify field
definitions as (field_type, field_default) as accepted by
pydantic.create_model.
Returns:
A new model class derived from the model type of self.
"""
new_fields = {}
for new_field_name, field_definition in field_mapping.items():
if isinstance(field_definition, str):
# A single string, interpreted as the name of a field on the existing
# model.
old_field = cls.model_fields[field_definition]
new_fields[new_field_name] = cls._derive_field(old_field)
else:
# We have been given a (field_type, field_default) tuple defining the
# new field directly.
field_type = field_definition[0]
if field_definition[1] is None and type(None) not in get_args(
field_type
):
field_type = Optional[field_type]
new_fields[new_field_name] = (field_type, field_definition[1])
return create_model( # type: ignore
model_name,
__base__=Model,
**new_fields,
)
|
Derive a new model with new field definitions.
Args:
model_name: Name of new model class.
field_mapping: A mapping where the keys represent field names and the values
represent field definitions. String field definitions are used as
pointers to the original fields by name. Otherwise, specify field
definitions as (field_type, field_default) as accepted by
pydantic.create_model.
Returns:
A new model class derived from the model type of self.
|
_derive_model
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def Field(
*args: Any, **kwargs: Any
) -> Any: # annotate with Any to make the downstream type annotations happy
"""Annotate model field with additional type and validation information.
This class is built on ``pydantic.Field`` and you can find the list of parameters
in the `API reference <https://docs.pydantic.dev/latest/api/fields/>`_.
Patito adds additional parameters which are used when validating dataframes,
these are documented here along with the main parameters which can be used for
validation. Pydantic's `usage documentation <https://docs.pydantic.dev/latest/concepts/fields/>`_
can be read with the below examples.
Args:
allow_missing (bool): Column may be missing.
column_info: (Type[ColumnInfo]): ColumnInfo object to pass args to.
constraints (Union[polars.Expression, List[polars.Expression]): A single
constraint or list of constraints, expressed as a polars expression objects.
All rows must satisfy the given constraint. You can refer to the given column
with ``pt.field``, which will automatically be replaced with
``polars.col(<field_name>)`` before evaluation.
derived_from (Union[str, polars.Expr]): used to mark fields that are meant to be
derived from other fields. Users can specify a polars expression that will
be called to derive the column value when `pt.DataFrame.derive` is called.
dtype (polars.datatype.DataType): The given dataframe column must have the given
polars dtype, for instance ``polars.UInt64`` or ``pl.Float32``.
unique (bool): All row values must be unique.
gt: All values must be greater than ``gt``.
ge: All values must be greater than or equal to ``ge``.
lt: All values must be less than ``lt``.
le: All values must be less than or equal to ``lt``.
multiple_of: All values must be multiples of the given value.
const (bool): If set to ``True`` `all` values must be equal to the provided
default value, the first argument provided to the ``Field`` constructor.
regex (str): UTF-8 string column must match regex pattern for all row values.
min_length (int): Minimum length of all string values in a UTF-8 column.
max_length (int): Maximum length of all string values in a UTF-8 column.
args (Any): additional arguments to pass to pydantic's field.
kwargs (Any): additional keyword arguments to pass to pydantic's field.
Return:
`FieldInfo <https://docs.pydantic.dev/latest/api/fields/#pydantic.fields.FieldInfo>`_:
Object used to represent additional constraints put upon the given field.
Examples:
>>> import patito as pt
>>> import polars as pl
>>> class Product(pt.Model):
... # Do not allow duplicates
... product_id: int = pt.Field(unique=True)
...
... # Price must be stored as unsigned 16-bit integers
... price: int = pt.Field(dtype=pl.UInt16)
...
... # The product name should be from 3 to 128 characters long
... name: str = pt.Field(min_length=3, max_length=128)
...
...
>>> Product.DataFrame(
... {
... "product_id": [1, 1],
... "price": [400, 600],
... }
... ).validate()
Traceback (most recent call last):
patito.exceptions.DataFrameValidationError: 3 validation errors for Product
name
Missing column (type=type_error.missingcolumns)
product_id
2 rows with duplicated values. (type=value_error.rowvalue)
price
Polars dtype Int64 does not match model field type. (type=type_error.columndtype)
"""
ci = ColumnInfo(**kwargs)
for field in ci.model_fields_set:
kwargs.pop(field)
if kwargs.pop("modern_kwargs_only", True):
for kwarg in kwargs:
if kwarg not in FIELD_KWARGS.kwonlyargs and kwarg not in FIELD_KWARGS.args:
raise ValueError(
f"unexpected kwarg {kwarg}={kwargs[kwarg]}. Add modern_kwargs_only=False to ignore"
)
ci_json = ci.model_dump_json()
existing_json_schema_extra = kwargs.pop("json_schema_extra", {})
merged_json_schema_extra = {**existing_json_schema_extra, "column_info": ci_json}
return fields.Field(
*args,
json_schema_extra=merged_json_schema_extra,
**kwargs,
)
|
Annotate model field with additional type and validation information.
This class is built on ``pydantic.Field`` and you can find the list of parameters
in the `API reference <https://docs.pydantic.dev/latest/api/fields/>`_.
Patito adds additional parameters which are used when validating dataframes,
these are documented here along with the main parameters which can be used for
validation. Pydantic's `usage documentation <https://docs.pydantic.dev/latest/concepts/fields/>`_
can be read with the below examples.
Args:
allow_missing (bool): Column may be missing.
column_info: (Type[ColumnInfo]): ColumnInfo object to pass args to.
constraints (Union[polars.Expression, List[polars.Expression]): A single
constraint or list of constraints, expressed as a polars expression objects.
All rows must satisfy the given constraint. You can refer to the given column
with ``pt.field``, which will automatically be replaced with
``polars.col(<field_name>)`` before evaluation.
derived_from (Union[str, polars.Expr]): used to mark fields that are meant to be
derived from other fields. Users can specify a polars expression that will
be called to derive the column value when `pt.DataFrame.derive` is called.
dtype (polars.datatype.DataType): The given dataframe column must have the given
polars dtype, for instance ``polars.UInt64`` or ``pl.Float32``.
unique (bool): All row values must be unique.
gt: All values must be greater than ``gt``.
ge: All values must be greater than or equal to ``ge``.
lt: All values must be less than ``lt``.
le: All values must be less than or equal to ``lt``.
multiple_of: All values must be multiples of the given value.
const (bool): If set to ``True`` `all` values must be equal to the provided
default value, the first argument provided to the ``Field`` constructor.
regex (str): UTF-8 string column must match regex pattern for all row values.
min_length (int): Minimum length of all string values in a UTF-8 column.
max_length (int): Maximum length of all string values in a UTF-8 column.
args (Any): additional arguments to pass to pydantic's field.
kwargs (Any): additional keyword arguments to pass to pydantic's field.
Return:
`FieldInfo <https://docs.pydantic.dev/latest/api/fields/#pydantic.fields.FieldInfo>`_:
Object used to represent additional constraints put upon the given field.
Examples:
>>> import patito as pt
>>> import polars as pl
>>> class Product(pt.Model):
... # Do not allow duplicates
... product_id: int = pt.Field(unique=True)
...
... # Price must be stored as unsigned 16-bit integers
... price: int = pt.Field(dtype=pl.UInt16)
...
... # The product name should be from 3 to 128 characters long
... name: str = pt.Field(min_length=3, max_length=128)
...
...
>>> Product.DataFrame(
... {
... "product_id": [1, 1],
... "price": [400, 600],
... }
... ).validate()
Traceback (most recent call last):
patito.exceptions.DataFrameValidationError: 3 validation errors for Product
name
Missing column (type=type_error.missingcolumns)
product_id
2 rows with duplicated values. (type=value_error.rowvalue)
price
Polars dtype Int64 does not match model field type. (type=type_error.columndtype)
|
Field
|
python
|
JakobGM/patito
|
src/patito/pydantic.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/pydantic.py
|
MIT
|
def _transform_df(dataframe: pl.DataFrame, schema: type[Model]) -> pl.DataFrame:
"""Transform any properties of the dataframe according to the model.
Currently only supports using AliasGenerator to transform column names to match a model.
Args:
dataframe: Polars DataFrame to be validated.
schema: Patito model which specifies how the dataframe should be structured.
"""
# Check if an alias generator is present in model_config
if alias_gen := schema.model_config.get("alias_generator"):
if isinstance(alias_gen, AliasGenerator):
alias_func = alias_gen.validation_alias or alias_gen.alias
assert (
alias_func is not None
), "An AliasGenerator must contain a transforming function"
else: # alias_gen is a function
alias_func = alias_gen
new_cols: list[str] = [
alias_func(field_name) for field_name in dataframe.columns
] # type: ignore
dataframe.columns = new_cols
return dataframe
|
Transform any properties of the dataframe according to the model.
Currently only supports using AliasGenerator to transform column names to match a model.
Args:
dataframe: Polars DataFrame to be validated.
schema: Patito model which specifies how the dataframe should be structured.
|
_transform_df
|
python
|
JakobGM/patito
|
src/patito/validators.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/validators.py
|
MIT
|
def _find_errors( # noqa: C901
dataframe: pl.DataFrame,
schema: type[Model],
columns: Sequence[str] | None = None,
allow_missing_columns: bool = False,
allow_superfluous_columns: bool = False,
) -> list[ErrorWrapper]:
"""Validate the given dataframe.
Args:
dataframe: Polars DataFrame to be validated.
schema: Patito model which specifies how the dataframe should be structured.
columns: If specified, only validate the given columns. Missing columns will
check if any specified columns are missing from the inputted dataframe,
and superfluous columns will check if any columns not specified in the
schema are present in the columns list.
allow_missing_columns: If True, missing columns will not be considered an error.
allow_superfluous_columns: If True, additional columns will not be considered an error.
Returns:
A list of patito.exception.ErrorWrapper instances. The specific validation
error can be retrieved from the "exc" attribute on each error wrapper instance.
MissingColumnsError: If there are any missing columns.
SuperfluousColumnsError: If there are additional, non-specified columns.
MissingValuesError: If there are nulls in a non-optional column.
ColumnDTypeError: If any column has the wrong dtype.
NotImplementedError: If validation has not been implement for the given
type.
"""
errors: list[ErrorWrapper] = []
schema_subset = columns or schema.columns
column_subset = columns or dataframe.columns
if not allow_missing_columns:
# Check if any columns are missing
for missing_column in set(schema_subset) - set(dataframe.columns):
col_info = schema.column_infos.get(missing_column)
if col_info is not None and col_info.allow_missing:
continue
errors.append(
ErrorWrapper(
MissingColumnsError("Missing column"),
loc=missing_column,
)
)
if not allow_superfluous_columns:
# Check if any additional columns are included
for superfluous_column in set(column_subset) - set(schema.columns):
errors.append(
ErrorWrapper(
SuperfluousColumnsError("Superfluous column"),
loc=superfluous_column,
)
)
# Check if any non-optional columns have null values
for column in schema.non_nullable_columns.intersection(column_subset):
num_missing_values = dataframe.get_column(name=column).null_count()
if num_missing_values:
errors.append(
ErrorWrapper(
MissingValuesError(
f"{num_missing_values} missing "
f"{'value' if num_missing_values == 1 else 'values'}"
),
loc=column,
)
)
for column, dtype in schema.dtypes.items():
if column not in column_subset:
continue
if not isinstance(dtype, pl.List):
continue
annotation = schema.model_fields[column].annotation # type: ignore[unreachable]
# Retrieve the annotation of the list itself,
# dewrapping any potential Optional[...]
list_type = unwrap_optional(annotation)
# Check if the list items themselves should be considered nullable
item_type = get_args(list_type)[0]
if is_optional(item_type):
continue
num_missing_values = (
dataframe.lazy()
.select(column)
# Remove those rows that do not contain lists at all
.filter(pl.col(column).is_not_null())
# Remove empty lists
.filter(pl.col(column).list.len() > 0)
# Convert lists of N items to N individual rows
.explode(column)
# Calculate how many nulls are present in lists
.filter(pl.col(column).is_null())
.collect()
.height
)
if num_missing_values != 0:
errors.append(
ErrorWrapper(
MissingValuesError(
f"{num_missing_values} missing "
f"{'value' if num_missing_values == 1 else 'values'} "
f"in lists"
),
loc=column,
)
)
# Check if any column has a wrong dtype
valid_dtypes = schema.valid_dtypes
dataframe_datatypes = dict(zip(dataframe.columns, dataframe.dtypes))
for column_name, column_properties in schema._schema_properties().items():
# We rename to _tmp here to avoid overwriting the dataframe during filters below
# TODO! Really we should be passing *Series* around rather than the entire dataframe
dataframe_tmp = dataframe
column_info = schema.column_infos[column_name]
if column_name not in dataframe_tmp.columns or column_name not in column_subset:
continue
polars_type = dataframe_datatypes[column_name]
if polars_type not in [
pl.Struct,
pl.List(pl.Struct),
]: # defer struct validation for recursive call to _find_errors later
if polars_type not in valid_dtypes[column_name]:
errors.append(
ErrorWrapper(
ColumnDTypeError(
f"Polars dtype {polars_type} does not match model field type."
),
loc=column_name,
)
)
# Test for when only specific values are accepted
e = _find_enum_errors(
df=dataframe_tmp,
column_name=column_name,
props=column_properties,
schema=schema,
)
if e is not None:
errors.append(e)
if column_info.unique:
# Coalescing to 0 in the case of dataframe of height 0
num_duplicated = dataframe_tmp[column_name].is_duplicated().sum() or 0
if num_duplicated > 0:
errors.append(
ErrorWrapper(
RowValueError(f"{num_duplicated} rows with duplicated values."),
loc=column_name,
)
)
# Intercept struct columns, and process errors separately
if schema.dtypes[column_name] == pl.Struct:
nested_schema = schema.model_fields[column_name].annotation
assert nested_schema is not None
# Additional unpack required if structs column is optional
if is_optional(nested_schema):
nested_schema = unwrap_optional(nested_schema)
# An optional struct means that we allow the struct entry to be
# null. It is the inner model that is responsible for determining
# whether its fields are optional or not. Since the struct is optional,
# we need to filter out any null rows as the inner model may disallow
# nulls on a particular field
# NB As of Polars 1.1, struct_col.is_null() cannot return True
# The following code has been added to accomodate this
struct_fields = dataframe_tmp[column_name].struct.fields
col_struct = pl.col(column_name).struct
only_non_null_expr = ~pl.all_horizontal(
[col_struct.field(name).is_null() for name in struct_fields]
)
dataframe_tmp = dataframe_tmp.filter(only_non_null_expr)
if dataframe_tmp.is_empty():
continue
struct_errors = _find_errors(
dataframe=dataframe_tmp.select(column_name).unnest(column_name),
schema=nested_schema,
)
# Format nested errors
for error in struct_errors:
error._loc = f"{column_name}.{error._loc}"
errors.extend(struct_errors)
# No need to do any more checks
continue
# Intercept list of structs columns, and process errors separately
elif schema.dtypes[column_name] == pl.List(pl.Struct):
list_annotation = schema.model_fields[column_name].annotation
assert list_annotation is not None
# Handle Optional[list[pl.Struct]]
if is_optional(list_annotation):
list_annotation = unwrap_optional(list_annotation)
dataframe_tmp = dataframe_tmp.filter(pl.col(column_name).is_not_null())
if dataframe_tmp.is_empty():
continue
# Unpack list schema
nested_schema = list_annotation.__args__[0]
dataframe_tmp = (
dataframe_tmp.select(column_name)
.filter(pl.col(column_name).list.len() > 0)
.explode(column_name)
.unnest(column_name)
)
# Handle list[Optional[pl.Struct]]
if is_optional(nested_schema):
nested_schema = unwrap_optional(nested_schema)
dataframe_tmp = dataframe_tmp.filter(pl.all().is_not_null())
if dataframe_tmp.is_empty():
continue
list_struct_errors = _find_errors(
dataframe=dataframe_tmp,
schema=nested_schema,
)
# Format nested errors
for error in list_struct_errors:
error._loc = f"{column_name}.{error._loc}"
errors.extend(list_struct_errors)
# No need to do any more checks
continue
# Check for bounded value fields
col = pl.col(column_name)
filters = {
"maximum": lambda v, col=col: col <= v,
"exclusiveMaximum": lambda v, col=col: col < v,
"minimum": lambda v, col=col: col >= v,
"exclusiveMinimum": lambda v, col=col: col > v,
"multipleOf": lambda v, col=col: (col == 0) | ((col % v) == 0),
"const": lambda v, col=col: col == v,
"pattern": lambda v, col=col: col.str.contains(v),
"minLength": lambda v, col=col: col.str.len_chars() >= v,
"maxLength": lambda v, col=col: col.str.len_chars() <= v,
}
if "anyOf" in column_properties:
checks = [
check(x[key])
for key, check in filters.items()
for x in column_properties["anyOf"]
if key in x
]
else:
checks = []
checks += [
check(column_properties[key])
for key, check in filters.items()
if key in column_properties
]
if checks:
n_invalid_rows = 0
for check in checks:
lazy_df = dataframe_tmp.lazy()
lazy_df = lazy_df.filter(
~check
) # get failing rows (nulls will evaluate to null on boolean check, we only want failures (false)))
invalid_rows = lazy_df.collect()
n_invalid_rows += invalid_rows.height
if n_invalid_rows > 0:
errors.append(
ErrorWrapper(
RowValueError(
f"{n_invalid_rows} row{'' if n_invalid_rows == 1 else 's'} "
"with out of bound values."
),
loc=column_name,
)
)
if column_info.constraints is not None:
custom_constraints = column_info.constraints
if isinstance(custom_constraints, pl.Expr):
custom_constraints = [custom_constraints]
constraints = pl.any_horizontal(
[constraint.not_() for constraint in custom_constraints]
)
if "_" in constraints.meta.root_names():
# An underscore is an alias for the current field
illegal_rows = dataframe_tmp.with_columns(
pl.col(column_name).alias("_")
).filter(constraints)
else:
illegal_rows = dataframe_tmp.filter(constraints)
if illegal_rows.height > 0:
errors.append(
ErrorWrapper(
RowValueError(
f"{illegal_rows.height} "
f"row{'' if illegal_rows.height == 1 else 's'} "
"does not match custom constraints."
),
loc=column_name,
)
)
return errors
|
Validate the given dataframe.
Args:
dataframe: Polars DataFrame to be validated.
schema: Patito model which specifies how the dataframe should be structured.
columns: If specified, only validate the given columns. Missing columns will
check if any specified columns are missing from the inputted dataframe,
and superfluous columns will check if any columns not specified in the
schema are present in the columns list.
allow_missing_columns: If True, missing columns will not be considered an error.
allow_superfluous_columns: If True, additional columns will not be considered an error.
Returns:
A list of patito.exception.ErrorWrapper instances. The specific validation
error can be retrieved from the "exc" attribute on each error wrapper instance.
MissingColumnsError: If there are any missing columns.
SuperfluousColumnsError: If there are additional, non-specified columns.
MissingValuesError: If there are nulls in a non-optional column.
ColumnDTypeError: If any column has the wrong dtype.
NotImplementedError: If validation has not been implement for the given
type.
|
_find_errors
|
python
|
JakobGM/patito
|
src/patito/validators.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/validators.py
|
MIT
|
def validate(
dataframe: pd.DataFrame | pl.DataFrame,
schema: type[Model],
columns: Sequence[str] | None = None,
allow_missing_columns: bool = False,
allow_superfluous_columns: bool = False,
drop_superfluous_columns: bool = False,
) -> pl.DataFrame:
"""Validate the given dataframe.
Args:
dataframe: Polars DataFrame to be validated.
schema: Patito model which specifies how the dataframe should be structured.
columns: Optional list of columns to validate. If not provided, all columns
of the dataframe will be validated.
allow_missing_columns: If True, missing columns will not be considered an error.
allow_superfluous_columns: If True, additional columns will not be considered an error.
drop_superfluous_columns: If True, drop any columns not specified in the schema before validation.
Raises:
DataFrameValidationError: If the given dataframe does not match the given schema.
"""
if drop_superfluous_columns and columns:
raise ValueError(
"Cannot specify both 'columns' and 'drop_superfluous_columns'."
)
if _PANDAS_AVAILABLE and isinstance(dataframe, pd.DataFrame):
polars_dataframe = pl.from_pandas(dataframe)
else:
polars_dataframe = cast(pl.DataFrame, dataframe).clone()
polars_dataframe = _transform_df(polars_dataframe, schema)
if drop_superfluous_columns:
# NOTE: dropping rather than selecting to get the correct error messages
to_drop = set(dataframe.columns) - set(schema.columns)
polars_dataframe = polars_dataframe.drop(to_drop)
errors = _find_errors(
dataframe=polars_dataframe,
schema=schema,
columns=columns,
allow_missing_columns=allow_missing_columns,
allow_superfluous_columns=allow_superfluous_columns,
)
if errors:
raise DataFrameValidationError(errors=errors, model=schema)
return polars_dataframe
|
Validate the given dataframe.
Args:
dataframe: Polars DataFrame to be validated.
schema: Patito model which specifies how the dataframe should be structured.
columns: Optional list of columns to validate. If not provided, all columns
of the dataframe will be validated.
allow_missing_columns: If True, missing columns will not be considered an error.
allow_superfluous_columns: If True, additional columns will not be considered an error.
drop_superfluous_columns: If True, drop any columns not specified in the schema before validation.
Raises:
DataFrameValidationError: If the given dataframe does not match the given schema.
|
validate
|
python
|
JakobGM/patito
|
src/patito/validators.py
|
https://github.com/JakobGM/patito/blob/master/src/patito/validators.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.