Spaces:
Sleeping
Sleeping
File size: 16,198 Bytes
05ca42f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 |
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.utilities.compute import _safe_divide, _adjust_weights_safe_divide
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.classification.stat_scores import BinaryStatScores, MulticlassStatScores, MultilabelStatScores
from torchmetrics.metric import Metric
from torchmetrics.utilities.enums import ClassificationTask
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["BinarySensitivity.plot", "MulticlassSensitivity.plot", "MultilabelSensitivity.plot"]
class BinarySensitivity(BinaryStatScores):
r"""Compute `Sensitivity`_ for binary tasks.
.. math:: \text{Sensitivity} = \frac{\text{TN}}{\text{TN} + \text{FP}}
Where :math:`\text{TN}` and :math:`\text{FP}` represent the number of true negatives and false positives
respectively. The metric is only proper defined when :math:`\text{TN} + \text{FP} \neq 0`. If this case is
encountered a score of 0 is returned.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point
tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per
element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``bs`` (:class:`~torch.Tensor`): If ``multidim_average`` is set to ``global``, the metric returns a scalar value.
If ``multidim_average`` is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value
per sample.
Args:
threshold: Threshold for transforming probability to binary {0,1} predictions
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
"""
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def compute(self) -> Tensor:
"""Compute metric."""
tp, fp, tn, fn = self._final_state()
return _sensitivity_reduce(tp, fp, tn, fn, average="binary", multidim_average=self.multidim_average)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
"""
return self._plot(val, ax)
class MulticlassSensitivity(MulticlassStatScores):
r"""Compute `Sensitivity`_ for multiclass tasks.
.. math:: \text{Sensitivity} = \frac{\text{TN}}{\text{TN} + \text{FP}}
Where :math:`\text{TN}` and :math:`\text{FP}` represent the number of true negatives and false positives
respectively. The metric is only proper defined when :math:`\text{TN} + \text{FP} \neq 0`. If this case is
encountered for any class, the metric for that class will be set to 0 and the overall metric may therefore be
affected in turn.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)``.
If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert
probabilities/logits into an int tensor.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mcs`` (:class:`~torch.Tensor`): The returned shape depends on the ``average`` and ``multidim_average``
arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
Args:
num_classes: Integer specifing the number of classes
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
top_k:
Number of highest probability or logit score predictions considered to find the correct label.
Only works when ``preds`` contain probabilities/logits.
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
"""
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
def compute(self) -> Tensor:
"""Compute metric."""
tp, fp, tn, fn = self._final_state()
return _sensitivity_reduce(tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
"""
return self._plot(val, ax)
class MultilabelSensitivity(MultilabelStatScores):
r"""Compute `Sensitivity`_ for multilabel tasks.
.. math:: \text{Sensitivity} = \frac{\text{TN}}{\text{TN} + \text{FP}}
Where :math:`\text{TN}` and :math:`\text{FP}` represent the number of true negatives and false positives
respectively. The metric is only proper defined when :math:`\text{TN} + \text{FP} \neq 0`. If this case is
encountered for any label, the metric for that label will be set to 0 and the overall metric may therefore be
affected in turn.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``. If preds is a floating
point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid
per element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mls`` (:class:`~torch.Tensor`): The returned shape depends on the ``average`` and ``multidim_average``
arguments:
- If ``multidim_average`` is set to ``global``
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
Args:
num_labels: Integer specifing the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
multidim_average: Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
"""
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Label"
def compute(self) -> Tensor:
"""Compute metric."""
tp, fp, tn, fn = self._final_state()
return _sensitivity_reduce(
tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average, multilabel=True
)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
"""
return self._plot(val, ax)
class Sensitivity(_ClassificationTaskWrapper):
r"""Compute `Sensitivity`_.
.. math:: \text{Sensitivity} = \frac{\text{TN}}{\text{TN} + \text{FP}}
Where :math:`\text{TN}` and :math:`\text{FP}` represent the number of true negatives and false positives
respectively. The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0`. If this case is
encountered for any class/label, the metric for that class/label will be set to 0 and the overall metric may
therefore be affected in turn.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:class:`~torchmetrics.classification.BinarySensitivity`, :class:`~torchmetrics.classification.MulticlassSensitivity`
and :class:`~torchmetrics.classification.MultilabelSensitivity` for the specific details of each argument influence
and examples.
Legacy Example:
"""
def __new__( # type: ignore[misc]
cls,
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
multidim_average: Optional[Literal["global", "samplewise"]] = "global",
top_k: Optional[int] = 1,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTask.from_str(task)
assert multidim_average is not None # noqa: S101 # needed for mypy
kwargs.update(
{"multidim_average": multidim_average, "ignore_index": ignore_index, "validate_args": validate_args}
)
if task == ClassificationTask.BINARY:
return BinarySensitivity(threshold, **kwargs)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
if not isinstance(top_k, int):
raise ValueError(f"`top_k` is expected to be `int` but `{type(top_k)} was passed.`")
return MulticlassSensitivity(num_classes, top_k, average, **kwargs)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return MultilabelSensitivity(num_labels, threshold, average, **kwargs)
raise ValueError(f"Task {task} not supported!")
def _sensitivity_reduce(
tp: Tensor,
fp: Tensor,
tn: Tensor,
fn: Tensor,
average: Optional[Literal["binary", "micro", "macro", "weighted", "none"]],
multidim_average: Literal["global", "samplewise"] = "global",
multilabel: bool = False,
) -> Tensor:
if average == "binary":
return _safe_divide(tp, tp + fn)
if average == "micro":
tp = tp.sum(dim=0 if multidim_average == "global" else 1)
fn = fn.sum(dim=0 if multidim_average == "global" else 1)
return _safe_divide(tp, tp + fn)
sensitivity_score = _safe_divide(tp, tp + fn)
return _adjust_weights_safe_divide(sensitivity_score, average, multilabel, tp, fp, fn)
|