diff --git a/.github/workflows/ci-tests-full.yml b/.github/workflows/ci-tests-full.yml index fcb1e4b638f..b7f30f1316a 100644 --- a/.github/workflows/ci-tests-full.yml +++ b/.github/workflows/ci-tests-full.yml @@ -79,7 +79,7 @@ jobs: - name: Set PyTorch version if: inputs.requires != 'oldest' run: | - pip install packaging + pip install packaging -q python ./requirements/adjust-versions.py requirements.txt ${{ matrix.pytorch-version }} - name: full chashing diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index de5db98005a..776ad865582 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -82,8 +82,12 @@ repos: hooks: - id: yesqa - - repo: https://github.com/PyCQA/flake8 - rev: 6.0.0 + - repo: https://github.com/charliermarsh/ruff-pre-commit + rev: 'v0.0.226' hooks: - - id: flake8 - name: PEP8 + - id: ruff + # Respect `exclude` and `extend-exclude` settings. + args: + - "--fix" + - "--respect-gitignore" + - "--force-exclude" diff --git a/pyproject.toml b/pyproject.toml index 896b5723eb7..5e3e8927fc5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,9 +1,54 @@ +[metadata] +license_file = "LICENSE" +description-file = "README.md" + [build-system] requires = [ "setuptools", "wheel", ] + +[tool.check-manifest] +ignore = [ + "*.yml", + ".github", + ".github/*" +] + + +[tool.pytest.ini_options] +norecursedirs = [ + ".git", + ".github", + "dist", + "build", + "docs", +] +addopts = [ + "--strict-markers", + "--doctest-modules", + "--doctest-plus", + "--color=yes", + "--disable-pytest-warnings", +] +# ToDo +#filterwarnings = ["error::FutureWarning"] +xfail_strict = true +junit_duration_report = "call" + +[tool.coverage.report] +exclude_lines = [ + "pragma: no cover", + "pass", +] + +[tool.coverage.run] +parallel = true +concurrency = "thread" +relative_files = true + + [tool.black] # https://github.com/psf/black line-length = 120 @@ -20,8 +65,56 @@ skip_glob = [] profile = "black" line_length = 120 -[tool.autopep8] -ignore = ["E731"] + +[tool.ruff] +line-length = 120 +# Enable Pyflakes `E` and `F` codes by default. +select = [ + "E", "W", # see: https://pypi.org/project/pycodestyle + "F", # see: https://pypi.org/project/pyflakes +# TODO +# "D", # see: https://pypi.org/project/pydocstyle +# "N", # see: https://pypi.org/project/pep8-naming +] +#extend-select = [ +# "C4", # see: https://pypi.org/project/flake8-comprehensions +# "PT", # see: https://pypi.org/project/flake8-pytest-style +# "RET", # see: https://pypi.org/project/flake8-return +# "SIM", # see: https://pypi.org/project/flake8-simplify +#] +ignore = [ + "E731", # Do not assign a lambda expression, use a def +] +# Exclude a variety of commonly ignored directories. +exclude = [ + ".eggs", + ".git", + ".mypy_cache", + ".ruff_cache", + "__pypackages__", + "_build", + "build", + "dist", + "docs" +] +ignore-init-module-imports = true + +[tool.ruff.per-file-ignores] +"setup.py" = ["D100", "SIM115"] +"__about__.py" = ["D100"] +"__init__.py" = ["D100"] + +[tool.ruff.pydocstyle] +# Use Google-style docstrings. +convention = "google" + +#[tool.ruff.pycodestyle] +#ignore-overlong-task-comments = true + +[tool.ruff.mccabe] +# Unlike Flake8, default to a complexity level of 10. +max-complexity = 10 + [tool.mypy] files = [ diff --git a/requirements/doctest.txt b/requirements/doctest.txt index 03b896a3f9f..f89fa0d29fe 100644 --- a/requirements/doctest.txt +++ b/requirements/doctest.txt @@ -1,3 +1,3 @@ -pytest -pytest-doctestplus -pytest-rerunfailures +pytest>=6.0.0, <7.2.0 +pytest-doctestplus>=0.9.0 +pytest-rerunfailures>=10.0 diff --git a/requirements/test.txt b/requirements/test.txt index 95f86d0165e..b4f295d7c0c 100644 --- a/requirements/test.txt +++ b/requirements/test.txt @@ -1,5 +1,5 @@ coverage>5.2 -pytest==6.* +pytest>=6.0.0, <7.2.0 pytest-cov>2.10 # pytest-flake8 pytest-doctestplus>=0.9.0 diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index abae64d8402..00000000000 --- a/setup.cfg +++ /dev/null @@ -1,58 +0,0 @@ -[tool:pytest] -norecursedirs = - .git - .github - dist - build -doctest_plus = enabled -addopts = - --strict - --color=yes -doctest_optionflags = - NORMALIZE_WHITESPACE - ELLIPSIS - # FLOAT_CMP - -[coverage:run] -parallel = True -concurrency = thread -relative_files = True - -[coverage:report] -exclude_lines = - pragma: no-cover - pass - - -[flake8] -max-line-length = 120 -exclude = - *.egg - build - temp -select = E,W,F -doctests = True -verbose = 2 -# https://pep8.readthedocs.io/en/latests/intro.html#error-codes -format = pylint -# see: https://www.flake8rules.com/ -ignore = - # Do not assign a lambda expression, use a def - E731 - # whitespace before ':' - E203 - - -# setup.cfg or tox.ini -[check-manifest] -ignore = - *.yml - .github - .github/* - - -[metadata] -license_file = LICENSE -description-file = README.md -# long_description = file:README.md -# long_description_content_type = text/markdown diff --git a/src/torchmetrics/aggregation.py b/src/torchmetrics/aggregation.py index a151d06da5f..dd1e40b6c50 100644 --- a/src/torchmetrics/aggregation.py +++ b/src/torchmetrics/aggregation.py @@ -118,11 +118,11 @@ class MaxMetric(BaseAggregator): If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float Example: - >>> import torch + >>> from torch import tensor >>> from torchmetrics import MaxMetric >>> metric = MaxMetric() >>> metric.update(1) - >>> metric.update(torch.tensor([2, 3])) + >>> metric.update(tensor([2, 3])) >>> metric.compute() tensor(3.) """ @@ -179,11 +179,11 @@ class MinMetric(BaseAggregator): If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float Example: - >>> import torch + >>> from torch import tensor >>> from torchmetrics import MinMetric >>> metric = MinMetric() >>> metric.update(1) - >>> metric.update(torch.tensor([2, 3])) + >>> metric.update(tensor([2, 3])) >>> metric.compute() tensor(1.) """ @@ -240,11 +240,11 @@ class SumMetric(BaseAggregator): If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float Example: - >>> import torch + >>> from torch import tensor >>> from torchmetrics import SumMetric >>> metric = SumMetric() >>> metric.update(1) - >>> metric.update(torch.tensor([2, 3])) + >>> metric.update(tensor([2, 3])) >>> metric.compute() tensor(6.) """ @@ -299,11 +299,11 @@ class CatMetric(BaseAggregator): If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float Example: - >>> import torch + >>> from torch import tensor >>> from torchmetrics import CatMetric >>> metric = CatMetric() >>> metric.update(1) - >>> metric.update(torch.tensor([2, 3])) + >>> metric.update(tensor([2, 3])) >>> metric.compute() tensor([1., 2., 3.]) """ diff --git a/src/torchmetrics/audio/pesq.py b/src/torchmetrics/audio/pesq.py index adf4fc2096e..7712196bd1b 100644 --- a/src/torchmetrics/audio/pesq.py +++ b/src/torchmetrics/audio/pesq.py @@ -62,8 +62,8 @@ class PerceptualEvaluationSpeechQuality(Metric): If ``mode`` is not either ``"wb"`` or ``"nb"`` Example: - >>> from torchmetrics.audio.pesq import PerceptualEvaluationSpeechQuality >>> import torch + >>> from torchmetrics.audio.pesq import PerceptualEvaluationSpeechQuality >>> g = torch.manual_seed(1) >>> preds = torch.randn(8000) >>> target = torch.randn(8000) diff --git a/src/torchmetrics/audio/sdr.py b/src/torchmetrics/audio/sdr.py index c6b59db82d8..efddeb42efd 100644 --- a/src/torchmetrics/audio/sdr.py +++ b/src/torchmetrics/audio/sdr.py @@ -56,8 +56,8 @@ class SignalDistortionRatio(Metric): kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example: - >>> from torchmetrics.audio import SignalDistortionRatio >>> import torch + >>> from torchmetrics.audio import SignalDistortionRatio >>> g = torch.manual_seed(1) >>> preds = torch.randn(8000) >>> target = torch.randn(8000) @@ -134,10 +134,10 @@ class ScaleInvariantSignalDistortionRatio(Metric): if target and preds have a different shape Example: - >>> import torch + >>> from torch import tensor >>> from torchmetrics import ScaleInvariantSignalDistortionRatio - >>> target = torch.tensor([3.0, -0.5, 2.0, 7.0]) - >>> preds = torch.tensor([2.5, 0.0, 2.0, 8.0]) + >>> target = tensor([3.0, -0.5, 2.0, 7.0]) + >>> preds = tensor([2.5, 0.0, 2.0, 8.0]) >>> si_sdr = ScaleInvariantSignalDistortionRatio() >>> si_sdr(preds, target) tensor(18.4030) diff --git a/src/torchmetrics/audio/snr.py b/src/torchmetrics/audio/snr.py index 9a14e72ebd2..c7f6db3e7ee 100644 --- a/src/torchmetrics/audio/snr.py +++ b/src/torchmetrics/audio/snr.py @@ -46,10 +46,10 @@ class SignalNoiseRatio(Metric): if target and preds have a different shape Example: - >>> import torch + >>> from torch import tensor >>> from torchmetrics import SignalNoiseRatio - >>> target = torch.tensor([3.0, -0.5, 2.0, 7.0]) - >>> preds = torch.tensor([2.5, 0.0, 2.0, 8.0]) + >>> target = tensor([3.0, -0.5, 2.0, 7.0]) + >>> preds = tensor([2.5, 0.0, 2.0, 8.0]) >>> snr = SignalNoiseRatio() >>> snr(preds, target) tensor(16.1805) @@ -103,10 +103,10 @@ class ScaleInvariantSignalNoiseRatio(Metric): if target and preds have a different shape Example: - >>> import torch + >>> from torch import tensor >>> from torchmetrics import ScaleInvariantSignalNoiseRatio - >>> target = torch.tensor([3.0, -0.5, 2.0, 7.0]) - >>> preds = torch.tensor([2.5, 0.0, 2.0, 8.0]) + >>> target = tensor([3.0, -0.5, 2.0, 7.0]) + >>> preds = tensor([2.5, 0.0, 2.0, 8.0]) >>> si_snr = ScaleInvariantSignalNoiseRatio() >>> si_snr(preds, target) tensor(15.0918) diff --git a/src/torchmetrics/audio/stoi.py b/src/torchmetrics/audio/stoi.py index e65bf3bffda..2446e21c322 100644 --- a/src/torchmetrics/audio/stoi.py +++ b/src/torchmetrics/audio/stoi.py @@ -58,8 +58,8 @@ class ShortTimeObjectiveIntelligibility(Metric): If ``pystoi`` package is not installed Example: - >>> from torchmetrics.audio.stoi import ShortTimeObjectiveIntelligibility >>> import torch + >>> from torchmetrics.audio.stoi import ShortTimeObjectiveIntelligibility >>> g = torch.manual_seed(1) >>> preds = torch.randn(8000) >>> target = torch.randn(8000) diff --git a/src/torchmetrics/classification/accuracy.py b/src/torchmetrics/classification/accuracy.py index dfbfffada43..8cc4c055b22 100644 --- a/src/torchmetrics/classification/accuracy.py +++ b/src/torchmetrics/classification/accuracy.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any, Optional, Sequence, Union -import torch from torch import Tensor from typing_extensions import Literal @@ -69,30 +68,27 @@ class BinaryAccuracy(BinaryStatScores): is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import BinaryAccuracy - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0, 0, 1, 1, 0, 1]) >>> metric = BinaryAccuracy() >>> metric(preds, target) tensor(0.6667) Example (preds is float tensor): >>> from torchmetrics.classification import BinaryAccuracy - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) >>> metric = BinaryAccuracy() >>> metric(preds, target) tensor(0.6667) Example (multidim tensors): >>> from torchmetrics.classification import BinaryAccuracy - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> metric = BinaryAccuracy(multidim_average='samplewise') >>> metric(preds, target) tensor([0.3333, 0.1667]) @@ -130,23 +126,23 @@ def plot( .. plot:: :scale: 75 + >>> from torch import rand, randint >>> # Example plotting a single value - >>> import torch >>> from torchmetrics.classification import BinaryAccuracy >>> metric = BinaryAccuracy() - >>> metric.update(torch.rand(10), torch.randint(2,(10,))) + >>> metric.update(rand(10), randint(2,(10,))) >>> fig_, ax_ = metric.plot() .. plot:: :scale: 75 + >>> from torch import rand, randint >>> # Example plotting multiple values - >>> import torch >>> from torchmetrics.classification import BinaryAccuracy >>> metric = BinaryAccuracy() >>> values = [ ] >>> for _ in range(10): - ... values.append(metric(torch.rand(10), torch.randint(2,(10,)))) + ... values.append(metric(rand(10), randint(2,(10,)))) >>> fig_, ax_ = metric.plot(values) """ val = val or self.compute() @@ -213,9 +209,10 @@ class MulticlassAccuracy(MulticlassStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MulticlassAccuracy - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> metric = MulticlassAccuracy(num_classes=3) >>> metric(preds, target) tensor(0.8333) @@ -225,13 +222,11 @@ class MulticlassAccuracy(MulticlassStatScores): Example (preds is float tensor): >>> from torchmetrics.classification import MulticlassAccuracy - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> metric = MulticlassAccuracy(num_classes=3) >>> metric(preds, target) tensor(0.8333) @@ -241,8 +236,8 @@ class MulticlassAccuracy(MulticlassStatScores): Example (multidim tensors): >>> from torchmetrics.classification import MulticlassAccuracy - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) >>> metric = MulticlassAccuracy(num_classes=3, multidim_average='samplewise') >>> metric(preds, target) tensor([0.5000, 0.2778]) @@ -284,23 +279,23 @@ def plot( .. plot:: :scale: 75 + >>> from torch import randint >>> # Example plotting a single value per class - >>> import torch >>> from torchmetrics.classification import MulticlassAccuracy >>> metric = MulticlassAccuracy(num_classes=3, average=None) - >>> metric.update(torch.randint(3, (20,)), torch.randint(3, (20,))) + >>> metric.update(randint(3, (20,)), randint(3, (20,))) >>> fig_, ax_ = metric.plot() .. plot:: :scale: 75 + >>> from torch import randint >>> # Example plotting a multiple values per class - >>> import torch >>> from torchmetrics.classification import MulticlassAccuracy >>> metric = MulticlassAccuracy(num_classes=3, average=None) >>> values = [] >>> for _ in range(20): - ... values.append(metric(torch.randint(3, (20,)), torch.randint(3, (20,)))) + ... values.append(metric(randint(3, (20,)), randint(3, (20,)))) >>> fig_, ax_ = metric.plot(values) """ val = val or self.compute() @@ -365,9 +360,10 @@ class MultilabelAccuracy(MultilabelStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MultilabelAccuracy - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> metric = MultilabelAccuracy(num_labels=3) >>> metric(preds, target) tensor(0.6667) @@ -377,8 +373,8 @@ class MultilabelAccuracy(MultilabelStatScores): Example (preds is float tensor): >>> from torchmetrics.classification import MultilabelAccuracy - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> metric = MultilabelAccuracy(num_labels=3) >>> metric(preds, target) tensor(0.6667) @@ -388,8 +384,8 @@ class MultilabelAccuracy(MultilabelStatScores): Example (multidim tensors): >>> from torchmetrics.classification import MultilabelAccuracy - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor( ... [ ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], @@ -430,15 +426,15 @@ class Accuracy: each argument influence and examples. Legacy Example: - >>> import torch - >>> target = torch.tensor([0, 1, 2, 3]) - >>> preds = torch.tensor([0, 2, 1, 3]) + >>> from torch import tensor + >>> target = tensor([0, 1, 2, 3]) + >>> preds = tensor([0, 2, 1, 3]) >>> accuracy = Accuracy(task="multiclass", num_classes=4) >>> accuracy(preds, target) tensor(0.5000) - >>> target = torch.tensor([0, 1, 2]) - >>> preds = torch.tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]]) + >>> target = tensor([0, 1, 2]) + >>> preds = tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]]) >>> accuracy = Accuracy(task="multiclass", num_classes=3, top_k=2) >>> accuracy(preds, target) tensor(0.6667) diff --git a/src/torchmetrics/classification/auroc.py b/src/torchmetrics/classification/auroc.py index c59db416f8b..7c7be29be68 100644 --- a/src/torchmetrics/classification/auroc.py +++ b/src/torchmetrics/classification/auroc.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any, List, Optional, Union -import torch from torch import Tensor from typing_extensions import Literal @@ -77,9 +76,10 @@ class BinaryAUROC(BinaryPrecisionRecallCurve): A single scalar with the auroc score Example: + >>> from torch import tensor >>> from torchmetrics.classification import BinaryAUROC - >>> preds = torch.tensor([0, 0.5, 0.7, 0.8]) - >>> target = torch.tensor([0, 1, 1, 0]) + >>> preds = tensor([0, 0.5, 0.7, 0.8]) + >>> target = tensor([0, 1, 1, 0]) >>> metric = BinaryAUROC(thresholds=None) >>> metric(preds, target) tensor(0.5000) @@ -163,12 +163,13 @@ class MulticlassAUROC(MulticlassPrecisionRecallCurve): If `average="macro"|"weighted"` then a single scalar is returned. Example: + >>> from torch import tensor >>> from torchmetrics.classification import MulticlassAUROC - >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], - ... [0.05, 0.75, 0.05, 0.05, 0.05], - ... [0.05, 0.05, 0.75, 0.05, 0.05], - ... [0.05, 0.05, 0.05, 0.75, 0.05]]) - >>> target = torch.tensor([0, 1, 3, 2]) + >>> preds = tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = tensor([0, 1, 3, 2]) >>> metric = MulticlassAUROC(num_classes=5, average="macro", thresholds=None) >>> metric(preds, target) tensor(0.5333) @@ -263,12 +264,13 @@ class MultilabelAUROC(MultilabelPrecisionRecallCurve): If `average="micro|macro"|"weighted"` then a single scalar is returned. Example: + >>> from torch import tensor >>> from torchmetrics.classification import MultilabelAUROC - >>> preds = torch.tensor([[0.75, 0.05, 0.35], + >>> preds = tensor([[0.75, 0.05, 0.35], ... [0.45, 0.75, 0.05], ... [0.05, 0.55, 0.75], ... [0.05, 0.65, 0.05]]) - >>> target = torch.tensor([[1, 0, 1], + >>> target = tensor([[1, 0, 1], ... [0, 0, 0], ... [0, 1, 1], ... [1, 1, 1]]) @@ -325,18 +327,19 @@ class AUROC: each argument influence and examples. Legacy Example: - >>> preds = torch.tensor([0.13, 0.26, 0.08, 0.19, 0.34]) - >>> target = torch.tensor([0, 0, 1, 1, 1]) + >>> from torch import tensor + >>> preds = tensor([0.13, 0.26, 0.08, 0.19, 0.34]) + >>> target = tensor([0, 0, 1, 1, 1]) >>> auroc = AUROC(task="binary") >>> auroc(preds, target) tensor(0.5000) - >>> preds = torch.tensor([[0.90, 0.05, 0.05], + >>> preds = tensor([[0.90, 0.05, 0.05], ... [0.05, 0.90, 0.05], ... [0.05, 0.05, 0.90], ... [0.85, 0.05, 0.10], ... [0.10, 0.10, 0.80]]) - >>> target = torch.tensor([0, 1, 1, 2, 2]) + >>> target = tensor([0, 1, 1, 2, 2]) >>> auroc = AUROC(task="multiclass", num_classes=3) >>> auroc(preds, target) tensor(0.7778) diff --git a/src/torchmetrics/classification/average_precision.py b/src/torchmetrics/classification/average_precision.py index a04fd82e901..529c8b8d836 100644 --- a/src/torchmetrics/classification/average_precision.py +++ b/src/torchmetrics/classification/average_precision.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any, List, Optional, Union -import torch from torch import Tensor from typing_extensions import Literal @@ -80,9 +79,10 @@ class BinaryAveragePrecision(BinaryPrecisionRecallCurve): A single scalar with the average precision score Example: + >>> from torch import tensor >>> from torchmetrics.classification import BinaryAveragePrecision - >>> preds = torch.tensor([0, 0.5, 0.7, 0.8]) - >>> target = torch.tensor([0, 1, 1, 0]) + >>> preds = tensor([0, 0.5, 0.7, 0.8]) + >>> target = tensor([0, 1, 1, 0]) >>> metric = BinaryAveragePrecision(thresholds=None) >>> metric(preds, target) tensor(0.5833) @@ -157,12 +157,13 @@ class MulticlassAveragePrecision(MulticlassPrecisionRecallCurve): If `average="macro"|"weighted"` then a single scalar is returned. Example: + >>> from torch import tensor >>> from torchmetrics.classification import MulticlassAveragePrecision - >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], - ... [0.05, 0.75, 0.05, 0.05, 0.05], - ... [0.05, 0.05, 0.75, 0.05, 0.05], - ... [0.05, 0.05, 0.05, 0.75, 0.05]]) - >>> target = torch.tensor([0, 1, 3, 2]) + >>> preds = tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = tensor([0, 1, 3, 2]) >>> metric = MulticlassAveragePrecision(num_classes=5, average="macro", thresholds=None) >>> metric(preds, target) tensor(0.6250) @@ -262,15 +263,16 @@ class MultilabelAveragePrecision(MultilabelPrecisionRecallCurve): If `average="micro|macro"|"weighted"` then a single scalar is returned. Example: + >>> from torch import tensor >>> from torchmetrics.classification import MultilabelAveragePrecision - >>> preds = torch.tensor([[0.75, 0.05, 0.35], - ... [0.45, 0.75, 0.05], - ... [0.05, 0.55, 0.75], - ... [0.05, 0.65, 0.05]]) - >>> target = torch.tensor([[1, 0, 1], - ... [0, 0, 0], - ... [0, 1, 1], - ... [1, 1, 1]]) + >>> preds = tensor([[0.75, 0.05, 0.35], + ... [0.45, 0.75, 0.05], + ... [0.05, 0.55, 0.75], + ... [0.05, 0.65, 0.05]]) + >>> target = tensor([[1, 0, 1], + ... [0, 0, 0], + ... [0, 1, 1], + ... [1, 1, 1]]) >>> metric = MultilabelAveragePrecision(num_labels=3, average="macro", thresholds=None) >>> metric(preds, target) tensor(0.7500) @@ -331,17 +333,18 @@ class AveragePrecision: for the specific details of each argument influence and examples. Legacy Example: - >>> pred = torch.tensor([0, 0.1, 0.8, 0.4]) - >>> target = torch.tensor([0, 1, 1, 1]) + >>> from torch import tensor + >>> pred = tensor([0, 0.1, 0.8, 0.4]) + >>> target = tensor([0, 1, 1, 1]) >>> average_precision = AveragePrecision(task="binary") >>> average_precision(pred, target) tensor(1.) - >>> pred = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], - ... [0.05, 0.75, 0.05, 0.05, 0.05], - ... [0.05, 0.05, 0.75, 0.05, 0.05], - ... [0.05, 0.05, 0.05, 0.75, 0.05]]) - >>> target = torch.tensor([0, 1, 3, 2]) + >>> pred = tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = tensor([0, 1, 3, 2]) >>> average_precision = AveragePrecision(task="multiclass", num_classes=5, average=None) >>> average_precision(pred, target) tensor([1.0000, 1.0000, 0.2500, 0.2500, nan]) diff --git a/src/torchmetrics/classification/calibration_error.py b/src/torchmetrics/classification/calibration_error.py index c1464b9540f..76db7b97d2b 100644 --- a/src/torchmetrics/classification/calibration_error.py +++ b/src/torchmetrics/classification/calibration_error.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any, Optional -import torch from torch import Tensor from typing_extensions import Literal @@ -72,9 +71,10 @@ class BinaryCalibrationError(Metric): kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example: + >>> from torch import tensor >>> from torchmetrics.classification import BinaryCalibrationError - >>> preds = torch.tensor([0.25, 0.25, 0.55, 0.75, 0.75]) - >>> target = torch.tensor([0, 0, 1, 1, 1]) + >>> preds = tensor([0.25, 0.25, 0.55, 0.75, 0.75]) + >>> target = tensor([0, 0, 1, 1, 1]) >>> metric = BinaryCalibrationError(n_bins=2, norm='l1') >>> metric(preds, target) tensor(0.2900) @@ -164,12 +164,13 @@ class MulticlassCalibrationError(Metric): kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example: + >>> from torch import tensor >>> from torchmetrics.classification import MulticlassCalibrationError - >>> preds = torch.tensor([[0.25, 0.20, 0.55], - ... [0.55, 0.05, 0.40], - ... [0.10, 0.30, 0.60], - ... [0.90, 0.05, 0.05]]) - >>> target = torch.tensor([0, 1, 2, 0]) + >>> preds = tensor([[0.25, 0.20, 0.55], + ... [0.55, 0.05, 0.40], + ... [0.10, 0.30, 0.60], + ... [0.90, 0.05, 0.05]]) + >>> target = tensor([0, 1, 2, 0]) >>> metric = MulticlassCalibrationError(num_classes=3, n_bins=3, norm='l1') >>> metric(preds, target) tensor(0.2000) diff --git a/src/torchmetrics/classification/cohen_kappa.py b/src/torchmetrics/classification/cohen_kappa.py index 70679526ce1..1da541d6771 100644 --- a/src/torchmetrics/classification/cohen_kappa.py +++ b/src/torchmetrics/classification/cohen_kappa.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any, Optional -import torch from torch import Tensor from typing_extensions import Literal @@ -62,17 +61,18 @@ class labels. kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import BinaryCohenKappa - >>> target = torch.tensor([1, 1, 0, 0]) - >>> preds = torch.tensor([0, 1, 0, 0]) + >>> target = tensor([1, 1, 0, 0]) + >>> preds = tensor([0, 1, 0, 0]) >>> metric = BinaryCohenKappa() >>> metric(preds, target) tensor(0.5000) Example (preds is float tensor): >>> from torchmetrics.classification import BinaryCohenKappa - >>> target = torch.tensor([1, 1, 0, 0]) - >>> preds = torch.tensor([0.35, 0.85, 0.48, 0.01]) + >>> target = tensor([1, 1, 0, 0]) + >>> preds = tensor([0.35, 0.85, 0.48, 0.01]) >>> metric = BinaryCohenKappa() >>> metric(preds, target) tensor(0.5000) @@ -135,22 +135,21 @@ class labels. kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (pred is integer tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MulticlassCohenKappa - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> metric = MulticlassCohenKappa(num_classes=3) >>> metric(preds, target) tensor(0.6364) Example (pred is float tensor): >>> from torchmetrics.classification import MulticlassCohenKappa - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> metric = MulticlassCohenKappa(num_classes=3) >>> metric(preds, target) tensor(0.6364) @@ -194,8 +193,9 @@ class labels. each argument influence and examples. Legacy Example: - >>> target = torch.tensor([1, 1, 0, 0]) - >>> preds = torch.tensor([0, 1, 0, 0]) + >>> from torch import tensor + >>> target = tensor([1, 1, 0, 0]) + >>> preds = tensor([0, 1, 0, 0]) >>> cohenkappa = CohenKappa(task="multiclass", num_classes=2) >>> cohenkappa(preds, target) tensor(0.5000) diff --git a/src/torchmetrics/classification/confusion_matrix.py b/src/torchmetrics/classification/confusion_matrix.py index 8c8ac7482b5..ff582bdf70e 100644 --- a/src/torchmetrics/classification/confusion_matrix.py +++ b/src/torchmetrics/classification/confusion_matrix.py @@ -156,9 +156,10 @@ class MulticlassConfusionMatrix(Metric): kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (pred is integer tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MulticlassConfusionMatrix - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> metric = MulticlassConfusionMatrix(num_classes=3) >>> metric(preds, target) tensor([[1, 1, 0], @@ -167,13 +168,11 @@ class MulticlassConfusionMatrix(Metric): Example (pred is float tensor): >>> from torchmetrics.classification import MulticlassConfusionMatrix - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> metric = MulticlassConfusionMatrix(num_classes=3) >>> metric(preds, target) tensor([[1, 1, 0], @@ -232,10 +231,10 @@ def plot(self, val: Optional[Tensor] = None) -> _PLOT_OUT_TYPE: .. plot:: :scale: 75 - >>> import torch + >>> from torch import randint >>> from torchmetrics.classification import MulticlassConfusionMatrix >>> metric = MulticlassConfusionMatrix(num_classes=5) - >>> metric.update(torch.randint(5, (20,)), torch.randint(5, (20,))) + >>> metric.update(randint(5, (20,)), randint(5, (20,))) >>> fig_, ax_ = metric.plot() """ val = val or self.compute() @@ -277,9 +276,10 @@ class MultilabelConfusionMatrix(Metric): kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MultilabelConfusionMatrix - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> metric = MultilabelConfusionMatrix(num_labels=3) >>> metric(preds, target) tensor([[[1, 0], [0, 1]], @@ -288,8 +288,8 @@ class MultilabelConfusionMatrix(Metric): Example (preds is float tensor): >>> from torchmetrics.classification import MultilabelConfusionMatrix - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> metric = MultilabelConfusionMatrix(num_labels=3) >>> metric(preds, target) tensor([[[1, 0], [0, 1]], @@ -344,23 +344,24 @@ class ConfusionMatrix: the specific details of each argument influence and examples. Legacy Example: - >>> target = torch.tensor([1, 1, 0, 0]) - >>> preds = torch.tensor([0, 1, 0, 0]) + >>> from torch import tensor + >>> target = tensor([1, 1, 0, 0]) + >>> preds = tensor([0, 1, 0, 0]) >>> confmat = ConfusionMatrix(task="binary", num_classes=2) >>> confmat(preds, target) tensor([[2, 0], [1, 1]]) - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> confmat = ConfusionMatrix(task="multiclass", num_classes=3) >>> confmat(preds, target) tensor([[1, 1, 0], [0, 1, 0], [0, 0, 1]]) - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> confmat = ConfusionMatrix(task="multilabel", num_labels=3) >>> confmat(preds, target) tensor([[[1, 0], [0, 1]], diff --git a/src/torchmetrics/classification/dice.py b/src/torchmetrics/classification/dice.py index 6bcae6247d8..9ab5cd9923f 100644 --- a/src/torchmetrics/classification/dice.py +++ b/src/torchmetrics/classification/dice.py @@ -119,10 +119,10 @@ class Dice(Metric): If ``num_classes`` is set and ``ignore_index`` is not in the range ``[0, num_classes)``. Example: - >>> import torch + >>> from torch import tensor >>> from torchmetrics import Dice - >>> preds = torch.tensor([2, 0, 2, 1]) - >>> target = torch.tensor([1, 1, 2, 0]) + >>> preds = tensor([2, 0, 2, 1]) + >>> target = tensor([1, 1, 2, 0]) >>> dice = Dice(average='micro') >>> dice(preds, target) tensor(0.2500) diff --git a/src/torchmetrics/classification/exact_match.py b/src/torchmetrics/classification/exact_match.py index be9dc446a28..098aeb3cb5b 100644 --- a/src/torchmetrics/classification/exact_match.py +++ b/src/torchmetrics/classification/exact_match.py @@ -69,17 +69,18 @@ class MulticlassExactMatch(Metric): - If ``multidim_average`` is set to ``samplewise`` the output will be a tensor of shape ``(N,)`` Example (multidim tensors): + >>> from torch import tensor >>> from torchmetrics.classification import MulticlassExactMatch - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]]) >>> metric = MulticlassExactMatch(num_classes=3, multidim_average='global') >>> metric(preds, target) tensor(0.5000) Example (multidim tensors): >>> from torchmetrics.classification import MulticlassExactMatch - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]]) >>> metric = MulticlassExactMatch(num_classes=3, multidim_average='samplewise') >>> metric(preds, target) tensor([1., 0.]) @@ -171,30 +172,27 @@ class MultilabelExactMatch(Metric): - If ``multidim_average`` is set to ``samplewise`` the output will be a tensor of shape ``(N,)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MultilabelExactMatch - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> metric = MultilabelExactMatch(num_labels=3) >>> metric(preds, target) tensor(0.5000) Example (preds is float tensor): >>> from torchmetrics.classification import MultilabelExactMatch - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> metric = MultilabelExactMatch(num_labels=3) >>> metric(preds, target) tensor(0.5000) Example (multidim tensors): >>> from torchmetrics.classification import MultilabelExactMatch - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> metric = MultilabelExactMatch(num_labels=3, multidim_average='samplewise') >>> metric(preds, target) tensor([0., 0.]) @@ -266,15 +264,16 @@ class ExactMatch: :mod:`MulticlassExactMatch` and :mod:`MultilabelExactMatch` for the specific details of each argument influence and examples. - Legacy Example: - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]]) + Legacy Example: + >>> from torch import tensor + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]]) >>> metric = ExactMatch(task="multiclass", num_classes=3, multidim_average='global') >>> metric(preds, target) tensor(0.5000) - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]]) >>> metric = ExactMatch(task="multiclass", num_classes=3, multidim_average='samplewise') >>> metric(preds, target) tensor([1., 0.]) diff --git a/src/torchmetrics/classification/f_beta.py b/src/torchmetrics/classification/f_beta.py index 49fd39005c5..04fab578563 100644 --- a/src/torchmetrics/classification/f_beta.py +++ b/src/torchmetrics/classification/f_beta.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any, Optional -import torch from torch import Tensor from typing_extensions import Literal @@ -64,30 +63,27 @@ class BinaryFBetaScore(BinaryStatScores): is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import BinaryFBetaScore - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0, 0, 1, 1, 0, 1]) >>> metric = BinaryFBetaScore(beta=2.0) >>> metric(preds, target) tensor(0.6667) Example (preds is float tensor): >>> from torchmetrics.classification import BinaryFBetaScore - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) >>> metric = BinaryFBetaScore(beta=2.0) >>> metric(preds, target) tensor(0.6667) Example (multidim tensors): >>> from torchmetrics.classification import BinaryFBetaScore - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> metric = BinaryFBetaScore(beta=2.0, multidim_average='samplewise') >>> metric(preds, target) tensor([0.5882, 0.0000]) @@ -179,9 +175,10 @@ class MulticlassFBetaScore(MulticlassStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MulticlassFBetaScore - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> metric = MulticlassFBetaScore(beta=2.0, num_classes=3) >>> metric(preds, target) tensor(0.7963) @@ -191,13 +188,11 @@ class MulticlassFBetaScore(MulticlassStatScores): Example (preds is float tensor): >>> from torchmetrics.classification import MulticlassFBetaScore - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> metric = MulticlassFBetaScore(beta=2.0, num_classes=3) >>> metric(preds, target) tensor(0.7963) @@ -207,8 +202,8 @@ class MulticlassFBetaScore(MulticlassStatScores): Example (multidim tensors): >>> from torchmetrics.classification import MulticlassFBetaScore - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) >>> metric = MulticlassFBetaScore(beta=2.0, num_classes=3, multidim_average='samplewise') >>> metric(preds, target) tensor([0.4697, 0.2706]) @@ -306,9 +301,10 @@ class MultilabelFBetaScore(MultilabelStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MultilabelFBetaScore - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> metric = MultilabelFBetaScore(beta=2.0, num_labels=3) >>> metric(preds, target) tensor(0.6111) @@ -318,8 +314,8 @@ class MultilabelFBetaScore(MultilabelStatScores): Example (preds is float tensor): >>> from torchmetrics.classification import MultilabelFBetaScore - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> metric = MultilabelFBetaScore(beta=2.0, num_labels=3) >>> metric(preds, target) tensor(0.6111) @@ -329,13 +325,9 @@ class MultilabelFBetaScore(MultilabelStatScores): Example (multidim tensors): >>> from torchmetrics.classification import MultilabelFBetaScore - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> metric = MultilabelFBetaScore(num_labels=3, beta=2.0, multidim_average='samplewise') >>> metric(preds, target) tensor([0.5556, 0.0000]) @@ -413,30 +405,27 @@ class BinaryF1Score(BinaryFBetaScore): is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import BinaryF1Score - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0, 0, 1, 1, 0, 1]) >>> metric = BinaryF1Score() >>> metric(preds, target) tensor(0.6667) Example (preds is float tensor): >>> from torchmetrics.classification import BinaryF1Score - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) >>> metric = BinaryF1Score() >>> metric(preds, target) tensor(0.6667) Example (multidim tensors): >>> from torchmetrics.classification import BinaryF1Score - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> metric = BinaryF1Score(multidim_average='samplewise') >>> metric(preds, target) tensor([0.5000, 0.0000]) @@ -519,9 +508,10 @@ class MulticlassF1Score(MulticlassFBetaScore): - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MulticlassF1Score - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> metric = MulticlassF1Score(num_classes=3) >>> metric(preds, target) tensor(0.7778) @@ -531,13 +521,11 @@ class MulticlassF1Score(MulticlassFBetaScore): Example (preds is float tensor): >>> from torchmetrics.classification import MulticlassF1Score - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> metric = MulticlassF1Score(num_classes=3) >>> metric(preds, target) tensor(0.7778) @@ -547,8 +535,8 @@ class MulticlassF1Score(MulticlassFBetaScore): Example (multidim tensors): >>> from torchmetrics.classification import MulticlassF1Score - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) >>> metric = MulticlassF1Score(num_classes=3, multidim_average='samplewise') >>> metric(preds, target) tensor([0.4333, 0.2667]) @@ -636,9 +624,10 @@ class MultilabelF1Score(MultilabelFBetaScore): - If ``average=None/'none'``, the shape will be ``(N, C)``` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MultilabelF1Score - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> metric = MultilabelF1Score(num_labels=3) >>> metric(preds, target) tensor(0.5556) @@ -648,8 +637,8 @@ class MultilabelF1Score(MultilabelFBetaScore): Example (preds is float tensor): >>> from torchmetrics.classification import MultilabelF1Score - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> metric = MultilabelF1Score(num_labels=3) >>> metric(preds, target) tensor(0.5556) @@ -659,13 +648,9 @@ class MultilabelF1Score(MultilabelFBetaScore): Example (multidim tensors): >>> from torchmetrics.classification import MultilabelF1Score - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> metric = MultilabelF1Score(num_labels=3, multidim_average='samplewise') >>> metric(preds, target) tensor([0.4444, 0.0000]) @@ -713,9 +698,9 @@ class FBetaScore: details of each argument influence and examples. Legcy Example: - >>> import torch - >>> target = torch.tensor([0, 1, 2, 0, 1, 2]) - >>> preds = torch.tensor([0, 2, 1, 0, 0, 1]) + >>> from torch import tensor + >>> target = tensor([0, 1, 2, 0, 1, 2]) + >>> preds = tensor([0, 2, 1, 0, 0, 1]) >>> f_beta = FBetaScore(task="multiclass", num_classes=3, beta=0.5) >>> f_beta(preds, target) tensor(0.3333) @@ -763,9 +748,9 @@ class F1Score: details of each argument influence and examples. Legacy Example: - >>> import torch - >>> target = torch.tensor([0, 1, 2, 0, 1, 2]) - >>> preds = torch.tensor([0, 2, 1, 0, 0, 1]) + >>> from torch import tensor + >>> target = tensor([0, 1, 2, 0, 1, 2]) + >>> preds = tensor([0, 2, 1, 0, 0, 1]) >>> f1 = F1Score(task="multiclass", num_classes=3) >>> f1(preds, target) tensor(0.3333) diff --git a/src/torchmetrics/classification/hamming.py b/src/torchmetrics/classification/hamming.py index cbb7a5ce987..a996db6c56b 100644 --- a/src/torchmetrics/classification/hamming.py +++ b/src/torchmetrics/classification/hamming.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any, Optional -import torch from torch import Tensor from typing_extensions import Literal @@ -61,30 +60,27 @@ class BinaryHammingDistance(BinaryStatScores): is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import BinaryHammingDistance - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0, 0, 1, 1, 0, 1]) >>> metric = BinaryHammingDistance() >>> metric(preds, target) tensor(0.3333) Example (preds is float tensor): >>> from torchmetrics.classification import BinaryHammingDistance - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) >>> metric = BinaryHammingDistance() >>> metric(preds, target) tensor(0.3333) Example (multidim tensors): >>> from torchmetrics.classification import BinaryHammingDistance - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> metric = BinaryHammingDistance(multidim_average='samplewise') >>> metric(preds, target) tensor([0.6667, 0.8333]) @@ -157,9 +153,10 @@ class MulticlassHammingDistance(MulticlassStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MulticlassHammingDistance - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> metric = MulticlassHammingDistance(num_classes=3) >>> metric(preds, target) tensor(0.1667) @@ -169,13 +166,11 @@ class MulticlassHammingDistance(MulticlassStatScores): Example (preds is float tensor): >>> from torchmetrics.classification import MulticlassHammingDistance - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> metric = MulticlassHammingDistance(num_classes=3) >>> metric(preds, target) tensor(0.1667) @@ -185,8 +180,8 @@ class MulticlassHammingDistance(MulticlassStatScores): Example (multidim tensors): >>> from torchmetrics.classification import MulticlassHammingDistance - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) >>> metric = MulticlassHammingDistance(num_classes=3, multidim_average='samplewise') >>> metric(preds, target) tensor([0.5000, 0.7222]) @@ -262,9 +257,10 @@ class MultilabelHammingDistance(MultilabelStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MultilabelHammingDistance - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> metric = MultilabelHammingDistance(num_labels=3) >>> metric(preds, target) tensor(0.3333) @@ -274,8 +270,8 @@ class MultilabelHammingDistance(MultilabelStatScores): Example (preds is float tensor): >>> from torchmetrics.classification import MultilabelHammingDistance - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> metric = MultilabelHammingDistance(num_labels=3) >>> metric(preds, target) tensor(0.3333) @@ -285,13 +281,9 @@ class MultilabelHammingDistance(MultilabelStatScores): Example (multidim tensors): >>> from torchmetrics.classification import MultilabelHammingDistance - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> metric = MultilabelHammingDistance(num_labels=3, multidim_average='samplewise') >>> metric(preds, target) tensor([0.6667, 0.8333]) @@ -328,8 +320,9 @@ class HammingDistance: specific details of each argument influence and examples. Legacy Example: - >>> target = torch.tensor([[0, 1], [1, 1]]) - >>> preds = torch.tensor([[0, 1], [0, 1]]) + >>> from torch import tensor + >>> target = tensor([[0, 1], [1, 1]]) + >>> preds = tensor([[0, 1], [0, 1]]) >>> hamming_distance = HammingDistance(task="multilabel", num_labels=2) >>> hamming_distance(preds, target) tensor(0.2500) diff --git a/src/torchmetrics/classification/hinge.py b/src/torchmetrics/classification/hinge.py index d76283f7457..f7fdb56849f 100644 --- a/src/torchmetrics/classification/hinge.py +++ b/src/torchmetrics/classification/hinge.py @@ -210,21 +210,21 @@ class HingeLoss: each argument influence and examples. Legacy Example: - >>> import torch - >>> target = torch.tensor([0, 1, 1]) - >>> preds = torch.tensor([0.5, 0.7, 0.1]) + >>> from torch import tensor + >>> target = tensor([0, 1, 1]) + >>> preds = tensor([0.5, 0.7, 0.1]) >>> hinge = HingeLoss(task="binary") >>> hinge(preds, target) tensor(0.9000) - >>> target = torch.tensor([0, 1, 2]) - >>> preds = torch.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) + >>> target = tensor([0, 1, 2]) + >>> preds = tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) >>> hinge = HingeLoss(task="multiclass", num_classes=3) >>> hinge(preds, target) tensor(1.5551) - >>> target = torch.tensor([0, 1, 2]) - >>> preds = torch.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) + >>> target = tensor([0, 1, 2]) + >>> preds = tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) >>> hinge = HingeLoss(task="multiclass", num_classes=3, multiclass_mode="one-vs-all") >>> hinge(preds, target) tensor([1.3743, 1.1945, 1.2359]) diff --git a/src/torchmetrics/classification/jaccard.py b/src/torchmetrics/classification/jaccard.py index c966fb63343..1ab068b5dca 100644 --- a/src/torchmetrics/classification/jaccard.py +++ b/src/torchmetrics/classification/jaccard.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any, Optional -import torch from torch import Tensor from typing_extensions import Literal @@ -52,17 +51,18 @@ class BinaryJaccardIndex(BinaryConfusionMatrix): kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import BinaryJaccardIndex - >>> target = torch.tensor([1, 1, 0, 0]) - >>> preds = torch.tensor([0, 1, 0, 0]) + >>> target = tensor([1, 1, 0, 0]) + >>> preds = tensor([0, 1, 0, 0]) >>> metric = BinaryJaccardIndex() >>> metric(preds, target) tensor(0.5000) Example (preds is float tensor): >>> from torchmetrics.classification import BinaryJaccardIndex - >>> target = torch.tensor([1, 1, 0, 0]) - >>> preds = torch.tensor([0.35, 0.85, 0.48, 0.01]) + >>> target = tensor([1, 1, 0, 0]) + >>> preds = tensor([0.35, 0.85, 0.48, 0.01]) >>> metric = BinaryJaccardIndex() >>> metric(preds, target) tensor(0.5000) @@ -120,22 +120,21 @@ class MulticlassJaccardIndex(MulticlassConfusionMatrix): kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (pred is integer tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MulticlassJaccardIndex - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> metric = MulticlassJaccardIndex(num_classes=3) >>> metric(preds, target) tensor(0.6667) Example (pred is float tensor): >>> from torchmetrics.classification import MulticlassJaccardIndex - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> metric = MulticlassJaccardIndex(num_classes=3) >>> metric(preds, target) tensor(0.6667) @@ -200,17 +199,18 @@ class MultilabelJaccardIndex(MultilabelConfusionMatrix): kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MultilabelJaccardIndex - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> metric = MultilabelJaccardIndex(num_labels=3) >>> metric(preds, target) tensor(0.5000) Example (preds is float tensor): >>> from torchmetrics.classification import MultilabelJaccardIndex - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> metric = MultilabelJaccardIndex(num_labels=3) >>> metric(preds, target) tensor(0.5000) @@ -260,8 +260,9 @@ class JaccardIndex: the specific details of each argument influence and examples. Legacy Example: - >>> target = torch.randint(0, 2, (10, 25, 25)) - >>> pred = torch.tensor(target) + >>> from torch import randint, tensor + >>> target = randint(0, 2, (10, 25, 25)) + >>> pred = tensor(target) >>> pred[2:5, 7:13, 9:15] = 1 - pred[2:5, 7:13, 9:15] >>> jaccard = JaccardIndex(task="multiclass", num_classes=2) >>> jaccard(pred, target) diff --git a/src/torchmetrics/classification/matthews_corrcoef.py b/src/torchmetrics/classification/matthews_corrcoef.py index 5b0a56f3ae3..7883916193b 100644 --- a/src/torchmetrics/classification/matthews_corrcoef.py +++ b/src/torchmetrics/classification/matthews_corrcoef.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any, Optional -import torch from torch import Tensor from typing_extensions import Literal @@ -44,17 +43,18 @@ class BinaryMatthewsCorrCoef(BinaryConfusionMatrix): kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import BinaryMatthewsCorrCoef - >>> target = torch.tensor([1, 1, 0, 0]) - >>> preds = torch.tensor([0, 1, 0, 0]) + >>> target = tensor([1, 1, 0, 0]) + >>> preds = tensor([0, 1, 0, 0]) >>> metric = BinaryMatthewsCorrCoef() >>> metric(preds, target) tensor(0.5774) Example (preds is float tensor): >>> from torchmetrics.classification import BinaryMatthewsCorrCoef - >>> target = torch.tensor([1, 1, 0, 0]) - >>> preds = torch.tensor([0.35, 0.85, 0.48, 0.01]) + >>> target = tensor([1, 1, 0, 0]) + >>> preds = tensor([0.35, 0.85, 0.48, 0.01]) >>> metric = BinaryMatthewsCorrCoef() >>> metric(preds, target) tensor(0.5774) @@ -99,22 +99,21 @@ class MulticlassMatthewsCorrCoef(MulticlassConfusionMatrix): kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (pred is integer tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MulticlassMatthewsCorrCoef - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> metric = MulticlassMatthewsCorrCoef(num_classes=3) >>> metric(preds, target) tensor(0.7000) Example (pred is float tensor): >>> from torchmetrics.classification import MulticlassMatthewsCorrCoef - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> metric = MulticlassMatthewsCorrCoef(num_classes=3) >>> metric(preds, target) tensor(0.7000) @@ -160,17 +159,18 @@ class MultilabelMatthewsCorrCoef(MultilabelConfusionMatrix): kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MultilabelMatthewsCorrCoef - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> metric = MultilabelMatthewsCorrCoef(num_labels=3) >>> metric(preds, target) tensor(0.3333) Example (preds is float tensor): >>> from torchmetrics.classification import MultilabelMatthewsCorrCoef - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> metric = MultilabelMatthewsCorrCoef(num_labels=3) >>> metric(preds, target) tensor(0.3333) @@ -204,8 +204,9 @@ class MatthewsCorrCoef: the specific details of each argument influence and examples. Legacy Example: - >>> target = torch.tensor([1, 1, 0, 0]) - >>> preds = torch.tensor([0, 1, 0, 0]) + >>> from torch import tensor + >>> target = tensor([1, 1, 0, 0]) + >>> preds = tensor([0, 1, 0, 0]) >>> matthews_corrcoef = MatthewsCorrCoef(task='binary') >>> matthews_corrcoef(preds, target) tensor(0.5774) diff --git a/src/torchmetrics/classification/precision_recall.py b/src/torchmetrics/classification/precision_recall.py index 146822bfd38..4fa555c3467 100644 --- a/src/torchmetrics/classification/precision_recall.py +++ b/src/torchmetrics/classification/precision_recall.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any, Optional -import torch from torch import Tensor from typing_extensions import Literal @@ -59,30 +58,27 @@ class BinaryPrecision(BinaryStatScores): is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import BinaryPrecision - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0, 0, 1, 1, 0, 1]) >>> metric = BinaryPrecision() >>> metric(preds, target) tensor(0.6667) Example (preds is float tensor): >>> from torchmetrics.classification import BinaryPrecision - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) >>> metric = BinaryPrecision() >>> metric(preds, target) tensor(0.6667) Example (multidim tensors): >>> from torchmetrics.classification import BinaryPrecision - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> metric = BinaryPrecision(multidim_average='samplewise') >>> metric(preds, target) tensor([0.4000, 0.0000]) @@ -154,9 +150,10 @@ class MulticlassPrecision(MulticlassStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MulticlassPrecision - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> metric = MulticlassPrecision(num_classes=3) >>> metric(preds, target) tensor(0.8333) @@ -166,13 +163,11 @@ class MulticlassPrecision(MulticlassStatScores): Example (preds is float tensor): >>> from torchmetrics.classification import MulticlassPrecision - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> metric = MulticlassPrecision(num_classes=3) >>> metric(preds, target) tensor(0.8333) @@ -182,8 +177,8 @@ class MulticlassPrecision(MulticlassStatScores): Example (multidim tensors): >>> from torchmetrics.classification import MulticlassPrecision - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) >>> metric = MulticlassPrecision(num_classes=3, multidim_average='samplewise') >>> metric(preds, target) tensor([0.3889, 0.2778]) @@ -258,9 +253,10 @@ class MultilabelPrecision(MultilabelStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MultilabelPrecision - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> metric = MultilabelPrecision(num_labels=3) >>> metric(preds, target) tensor(0.5000) @@ -270,8 +266,8 @@ class MultilabelPrecision(MultilabelStatScores): Example (preds is float tensor): >>> from torchmetrics.classification import MultilabelPrecision - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> metric = MultilabelPrecision(num_labels=3) >>> metric(preds, target) tensor(0.5000) @@ -281,13 +277,9 @@ class MultilabelPrecision(MultilabelStatScores): Example (multidim tensors): >>> from torchmetrics.classification import MultilabelPrecision - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> metric = MultilabelPrecision(num_labels=3, multidim_average='samplewise') >>> metric(preds, target) tensor([0.3333, 0.0000]) @@ -344,30 +336,27 @@ class BinaryRecall(BinaryStatScores): is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import BinaryRecall - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0, 0, 1, 1, 0, 1]) >>> metric = BinaryRecall() >>> metric(preds, target) tensor(0.6667) Example (preds is float tensor): >>> from torchmetrics.classification import BinaryRecall - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) >>> metric = BinaryRecall() >>> metric(preds, target) tensor(0.6667) Example (multidim tensors): >>> from torchmetrics.classification import BinaryRecall - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> metric = BinaryRecall(multidim_average='samplewise') >>> metric(preds, target) tensor([0.6667, 0.0000]) @@ -439,9 +428,10 @@ class MulticlassRecall(MulticlassStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MulticlassRecall - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> metric = MulticlassRecall(num_classes=3) >>> metric(preds, target) tensor(0.8333) @@ -451,13 +441,11 @@ class MulticlassRecall(MulticlassStatScores): Example (preds is float tensor): >>> from torchmetrics.classification import MulticlassRecall - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> metric = MulticlassRecall(num_classes=3) >>> metric(preds, target) tensor(0.8333) @@ -467,8 +455,8 @@ class MulticlassRecall(MulticlassStatScores): Example (multidim tensors): >>> from torchmetrics.classification import MulticlassRecall - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) >>> metric = MulticlassRecall(num_classes=3, multidim_average='samplewise') >>> metric(preds, target) tensor([0.5000, 0.2778]) @@ -543,9 +531,10 @@ class MultilabelRecall(MultilabelStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MultilabelRecall - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> metric = MultilabelRecall(num_labels=3) >>> metric(preds, target) tensor(0.6667) @@ -555,8 +544,8 @@ class MultilabelRecall(MultilabelStatScores): Example (preds is float tensor): >>> from torchmetrics.classification import MultilabelRecall - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> metric = MultilabelRecall(num_labels=3) >>> metric(preds, target) tensor(0.6667) @@ -566,13 +555,9 @@ class MultilabelRecall(MultilabelStatScores): Example (multidim tensors): >>> from torchmetrics.classification import MultilabelRecall - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> metric = MultilabelRecall(num_labels=3, multidim_average='samplewise') >>> metric(preds, target) tensor([0.6667, 0.0000]) @@ -606,9 +591,9 @@ class Precision: each argument influence and examples. Legacy Example: - >>> import torch - >>> preds = torch.tensor([2, 0, 2, 1]) - >>> target = torch.tensor([1, 1, 2, 0]) + >>> from torch import tensor + >>> preds = tensor([2, 0, 2, 1]) + >>> target = tensor([1, 1, 2, 0]) >>> precision = Precision(task="multiclass", average='macro', num_classes=3) >>> precision(preds, target) tensor(0.1667) @@ -660,9 +645,9 @@ class Recall: each argument influence and examples. Legacy Example: - >>> import torch - >>> preds = torch.tensor([2, 0, 2, 1]) - >>> target = torch.tensor([1, 1, 2, 0]) + >>> from torch import tensor + >>> preds = tensor([2, 0, 2, 1]) + >>> target = tensor([1, 1, 2, 0]) >>> recall = Recall(task="multiclass", average='macro', num_classes=3) >>> recall(preds, target) tensor(0.3333) diff --git a/src/torchmetrics/classification/recall_at_fixed_precision.py b/src/torchmetrics/classification/recall_at_fixed_precision.py index 3121663653c..4a1cd1f5efc 100644 --- a/src/torchmetrics/classification/recall_at_fixed_precision.py +++ b/src/torchmetrics/classification/recall_at_fixed_precision.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any, List, Optional, Tuple, Union -import torch from torch import Tensor from typing_extensions import Literal @@ -79,9 +78,10 @@ class BinaryRecallAtFixedPrecision(BinaryPrecisionRecallCurve): - threshold: an scalar tensor with the corresponding threshold level Example: + >>> from torch import tensor >>> from torchmetrics.classification import BinaryRecallAtFixedPrecision - >>> preds = torch.tensor([0, 0.5, 0.7, 0.8]) - >>> target = torch.tensor([0, 1, 1, 0]) + >>> preds = tensor([0, 0.5, 0.7, 0.8]) + >>> target = tensor([0, 1, 1, 0]) >>> metric = BinaryRecallAtFixedPrecision(min_precision=0.5, thresholds=None) >>> metric(preds, target) (tensor(1.), tensor(0.5000)) @@ -161,12 +161,13 @@ class MulticlassRecallAtFixedPrecision(MulticlassPrecisionRecallCurve): - thresholds: an 1d tensor of size (n_classes, ) with the corresponding threshold level per class Example: + >>> from torch import tensor >>> from torchmetrics.classification import MulticlassRecallAtFixedPrecision - >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], - ... [0.05, 0.75, 0.05, 0.05, 0.05], - ... [0.05, 0.05, 0.75, 0.05, 0.05], - ... [0.05, 0.05, 0.05, 0.75, 0.05]]) - >>> target = torch.tensor([0, 1, 3, 2]) + >>> preds = tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = tensor([0, 1, 3, 2]) >>> metric = MulticlassRecallAtFixedPrecision(num_classes=5, min_precision=0.5, thresholds=None) >>> metric(preds, target) (tensor([1., 1., 0., 0., 0.]), tensor([7.5000e-01, 7.5000e-01, 1.0000e+06, 1.0000e+06, 1.0000e+06])) @@ -251,15 +252,16 @@ class MultilabelRecallAtFixedPrecision(MultilabelPrecisionRecallCurve): - thresholds: an 1d tensor of size (n_classes, ) with the corresponding threshold level per class Example: + >>> from torch import tensor >>> from torchmetrics.classification import MultilabelRecallAtFixedPrecision - >>> preds = torch.tensor([[0.75, 0.05, 0.35], - ... [0.45, 0.75, 0.05], - ... [0.05, 0.55, 0.75], - ... [0.05, 0.65, 0.05]]) - >>> target = torch.tensor([[1, 0, 1], - ... [0, 0, 0], - ... [0, 1, 1], - ... [1, 1, 1]]) + >>> preds = tensor([[0.75, 0.05, 0.35], + ... [0.45, 0.75, 0.05], + ... [0.05, 0.55, 0.75], + ... [0.05, 0.65, 0.05]]) + >>> target = tensor([[1, 0, 1], + ... [0, 0, 0], + ... [0, 1, 1], + ... [1, 1, 1]]) >>> metric = MultilabelRecallAtFixedPrecision(num_labels=3, min_precision=0.5, thresholds=None) >>> metric(preds, target) (tensor([1., 1., 1.]), tensor([0.0500, 0.5500, 0.0500])) diff --git a/src/torchmetrics/classification/roc.py b/src/torchmetrics/classification/roc.py index ca9adf24472..f1f0078ea05 100644 --- a/src/torchmetrics/classification/roc.py +++ b/src/torchmetrics/classification/roc.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any, List, Optional, Tuple, Union -import torch from torch import Tensor from typing_extensions import Literal @@ -79,9 +78,10 @@ class BinaryROC(BinaryPrecisionRecallCurve): - thresholds: an 1d tensor of size (n_thresholds, ) with decreasing threshold values Example: + >>> from torch import tensor >>> from torchmetrics.classification import BinaryROC - >>> preds = torch.tensor([0, 0.5, 0.7, 0.8]) - >>> target = torch.tensor([0, 1, 1, 0]) + >>> preds = tensor([0, 0.5, 0.7, 0.8]) + >>> target = tensor([0, 1, 1, 0]) >>> metric = BinaryROC(thresholds=None) >>> metric(preds, target) # doctest: +NORMALIZE_WHITESPACE (tensor([0.0000, 0.5000, 0.5000, 0.5000, 1.0000]), @@ -160,12 +160,13 @@ class MulticlassROC(MulticlassPrecisionRecallCurve): then a single 1d tensor of size (n_thresholds, ) is returned with shared threshold values for all classes. Example: + >>> from torch import tensor >>> from torchmetrics.classification import MulticlassROC - >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], - ... [0.05, 0.75, 0.05, 0.05, 0.05], - ... [0.05, 0.05, 0.75, 0.05, 0.05], - ... [0.05, 0.05, 0.05, 0.75, 0.05]]) - >>> target = torch.tensor([0, 1, 3, 2]) + >>> preds = tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = tensor([0, 1, 3, 2]) >>> metric = MulticlassROC(num_classes=5, thresholds=None) >>> fpr, tpr, thresholds = metric(preds, target) >>> fpr # doctest: +NORMALIZE_WHITESPACE @@ -257,15 +258,16 @@ class MultilabelROC(MultilabelPrecisionRecallCurve): then a single 1d tensor of size (n_thresholds, ) is returned with shared threshold values for all labels. Example: + >>> from torch import tensor >>> from torchmetrics.classification import MultilabelROC - >>> preds = torch.tensor([[0.75, 0.05, 0.35], - ... [0.45, 0.75, 0.05], - ... [0.05, 0.55, 0.75], - ... [0.05, 0.65, 0.05]]) - >>> target = torch.tensor([[1, 0, 1], - ... [0, 0, 0], - ... [0, 1, 1], - ... [1, 1, 1]]) + >>> preds = tensor([[0.75, 0.05, 0.35], + ... [0.45, 0.75, 0.05], + ... [0.05, 0.55, 0.75], + ... [0.05, 0.65, 0.05]]) + >>> target = tensor([[1, 0, 1], + ... [0, 0, 0], + ... [0, 1, 1], + ... [1, 1, 1]]) >>> metric = MultilabelROC(num_labels=3, thresholds=None) >>> fpr, tpr, thresholds = metric(preds, target) >>> fpr # doctest: +NORMALIZE_WHITESPACE @@ -313,8 +315,9 @@ class ROC: influence and examples. Legacy Example: - >>> pred = torch.tensor([0.0, 1.0, 2.0, 3.0]) - >>> target = torch.tensor([0, 1, 1, 1]) + >>> from torch import tensor + >>> pred = tensor([0.0, 1.0, 2.0, 3.0]) + >>> target = tensor([0, 1, 1, 1]) >>> roc = ROC(task="binary") >>> fpr, tpr, thresholds = roc(pred, target) >>> fpr @@ -324,11 +327,11 @@ class ROC: >>> thresholds tensor([1.0000, 0.9526, 0.8808, 0.7311, 0.5000]) - >>> pred = torch.tensor([[0.75, 0.05, 0.05, 0.05], - ... [0.05, 0.75, 0.05, 0.05], - ... [0.05, 0.05, 0.75, 0.05], - ... [0.05, 0.05, 0.05, 0.75]]) - >>> target = torch.tensor([0, 1, 3, 2]) + >>> pred = tensor([[0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05], + ... [0.05, 0.05, 0.05, 0.75]]) + >>> target = tensor([0, 1, 3, 2]) >>> roc = ROC(task="multiclass", num_classes=4) >>> fpr, tpr, thresholds = roc(pred, target) >>> fpr @@ -341,11 +344,11 @@ class ROC: tensor([1.0000, 0.7500, 0.0500]), tensor([1.0000, 0.7500, 0.0500])] - >>> pred = torch.tensor([[0.8191, 0.3680, 0.1138], - ... [0.3584, 0.7576, 0.1183], - ... [0.2286, 0.3468, 0.1338], - ... [0.8603, 0.0745, 0.1837]]) - >>> target = torch.tensor([[1, 1, 0], [0, 1, 0], [0, 0, 0], [0, 1, 1]]) + >>> pred = tensor([[0.8191, 0.3680, 0.1138], + ... [0.3584, 0.7576, 0.1183], + ... [0.2286, 0.3468, 0.1338], + ... [0.8603, 0.0745, 0.1837]]) + >>> target = tensor([[1, 1, 0], [0, 1, 0], [0, 0, 0], [0, 1, 1]]) >>> roc = ROC(task='multilabel', num_labels=3) >>> fpr, tpr, thresholds = roc(pred, target) >>> fpr diff --git a/src/torchmetrics/classification/specificity.py b/src/torchmetrics/classification/specificity.py index 2a572e153f1..440d8964448 100644 --- a/src/torchmetrics/classification/specificity.py +++ b/src/torchmetrics/classification/specificity.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any, Optional -import torch from torch import Tensor from typing_extensions import Literal @@ -59,30 +58,27 @@ class BinarySpecificity(BinaryStatScores): is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import BinarySpecificity - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0, 0, 1, 1, 0, 1]) >>> metric = BinarySpecificity() >>> metric(preds, target) tensor(0.6667) Example (preds is float tensor): >>> from torchmetrics.classification import BinarySpecificity - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) >>> metric = BinarySpecificity() >>> metric(preds, target) tensor(0.6667) Example (multidim tensors): >>> from torchmetrics.classification import BinarySpecificity - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> metric = BinarySpecificity(multidim_average='samplewise') >>> metric(preds, target) tensor([0.0000, 0.3333]) @@ -149,9 +145,10 @@ class MulticlassSpecificity(MulticlassStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MulticlassSpecificity - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> metric = MulticlassSpecificity(num_classes=3) >>> metric(preds, target) tensor(0.8889) @@ -161,13 +158,11 @@ class MulticlassSpecificity(MulticlassStatScores): Example (preds is float tensor): >>> from torchmetrics.classification import MulticlassSpecificity - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> metric = MulticlassSpecificity(num_classes=3) >>> metric(preds, target) tensor(0.8889) @@ -177,8 +172,8 @@ class MulticlassSpecificity(MulticlassStatScores): Example (multidim tensors): >>> from torchmetrics.classification import MulticlassSpecificity - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) >>> metric = MulticlassSpecificity(num_classes=3, multidim_average='samplewise') >>> metric(preds, target) tensor([0.7500, 0.6556]) @@ -243,9 +238,10 @@ class MultilabelSpecificity(MultilabelStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MultilabelSpecificity - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> metric = MultilabelSpecificity(num_labels=3) >>> metric(preds, target) tensor(0.6667) @@ -255,8 +251,8 @@ class MultilabelSpecificity(MultilabelStatScores): Example (preds is float tensor): >>> from torchmetrics.classification import MultilabelSpecificity - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> metric = MultilabelSpecificity(num_labels=3) >>> metric(preds, target) tensor(0.6667) @@ -266,13 +262,9 @@ class MultilabelSpecificity(MultilabelStatScores): Example (multidim tensors): >>> from torchmetrics.classification import MultilabelSpecificity - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> metric = MultilabelSpecificity(num_labels=3, multidim_average='samplewise') >>> metric(preds, target) tensor([0.0000, 0.3333]) @@ -301,8 +293,9 @@ class Specificity: details of each argument influence and examples. Legacy Example: - >>> preds = torch.tensor([2, 0, 2, 1]) - >>> target = torch.tensor([1, 1, 2, 0]) + >>> from torch import tensor + >>> preds = tensor([2, 0, 2, 1]) + >>> target = tensor([1, 1, 2, 0]) >>> specificity = Specificity(task="multiclass", average='macro', num_classes=3) >>> specificity(preds, target) tensor(0.6111) diff --git a/src/torchmetrics/classification/stat_scores.py b/src/torchmetrics/classification/stat_scores.py index 653ba23b125..7aa311bdbfa 100644 --- a/src/torchmetrics/classification/stat_scores.py +++ b/src/torchmetrics/classification/stat_scores.py @@ -120,30 +120,27 @@ class BinaryStatScores(_AbstractStatScores): kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import BinaryStatScores - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0, 0, 1, 1, 0, 1]) >>> metric = BinaryStatScores() >>> metric(preds, target) tensor([2, 1, 2, 1, 3]) Example (preds is float tensor): >>> from torchmetrics.classification import BinaryStatScores - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) >>> metric = BinaryStatScores() >>> metric(preds, target) tensor([2, 1, 2, 1, 3]) Example (multidim tensors): >>> from torchmetrics.classification import BinaryStatScores - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> metric = BinaryStatScores(multidim_average='samplewise') >>> metric(preds, target) tensor([[2, 3, 0, 1, 3], @@ -238,9 +235,10 @@ class MulticlassStatScores(_AbstractStatScores): kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MulticlassStatScores - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> metric = MulticlassStatScores(num_classes=3, average='micro') >>> metric(preds, target) tensor([3, 1, 7, 1, 4]) @@ -252,13 +250,11 @@ class MulticlassStatScores(_AbstractStatScores): Example (preds is float tensor): >>> from torchmetrics.classification import MulticlassStatScores - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> metric = MulticlassStatScores(num_classes=3, average='micro') >>> metric(preds, target) tensor([3, 1, 7, 1, 4]) @@ -270,8 +266,8 @@ class MulticlassStatScores(_AbstractStatScores): Example (multidim tensors): >>> from torchmetrics.classification import MulticlassStatScores - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) >>> metric = MulticlassStatScores(num_classes=3, multidim_average="samplewise", average='micro') >>> metric(preds, target) tensor([[3, 3, 9, 3, 6], @@ -383,9 +379,10 @@ class MultilabelStatScores(_AbstractStatScores): kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.classification import MultilabelStatScores - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> metric = MultilabelStatScores(num_labels=3, average='micro') >>> metric(preds, target) tensor([2, 1, 2, 1, 3]) @@ -397,8 +394,8 @@ class MultilabelStatScores(_AbstractStatScores): Example (preds is float tensor): >>> from torchmetrics.classification import MultilabelStatScores - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> metric = MultilabelStatScores(num_labels=3, average='micro') >>> metric(preds, target) tensor([2, 1, 2, 1, 3]) @@ -410,13 +407,9 @@ class MultilabelStatScores(_AbstractStatScores): Example (multidim tensors): >>> from torchmetrics.classification import MultilabelStatScores - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> metric = MultilabelStatScores(num_labels=3, multidim_average='samplewise', average='micro') >>> metric(preds, target) tensor([[2, 3, 0, 1, 3], @@ -483,8 +476,9 @@ class StatScores: details of each argument influence and examples. Legacy Example: - >>> preds = torch.tensor([1, 0, 2, 1]) - >>> target = torch.tensor([1, 1, 2, 0]) + >>> from torch import tensor + >>> preds = tensor([1, 0, 2, 1]) + >>> target = tensor([1, 1, 2, 0]) >>> stat_scores = StatScores(task="multiclass", num_classes=3, average='micro') >>> stat_scores(preds, target) tensor([2, 2, 6, 2, 4]) diff --git a/src/torchmetrics/collections.py b/src/torchmetrics/collections.py index 8e647d3f220..3cabfa7cdee 100644 --- a/src/torchmetrics/collections.py +++ b/src/torchmetrics/collections.py @@ -80,12 +80,12 @@ class name as key for the output dict. If ``postfix`` is set and it is not a string. Example (input as list): - >>> import torch + >>> from torch import tensor >>> from pprint import pprint >>> from torchmetrics import MetricCollection, MeanSquaredError >>> from torchmetrics.classification import MulticlassAccuracy, MulticlassPrecision, MulticlassRecall - >>> target = torch.tensor([0, 2, 0, 2, 0, 1, 0, 2]) - >>> preds = torch.tensor([2, 1, 2, 0, 1, 2, 2, 2]) + >>> target = tensor([0, 2, 0, 2, 0, 1, 0, 2]) + >>> preds = tensor([2, 1, 2, 0, 1, 2, 2, 2]) >>> metrics = MetricCollection([MulticlassAccuracy(num_classes=3, average='micro'), ... MulticlassPrecision(num_classes=3, average='macro'), ... MulticlassRecall(num_classes=3, average='macro')]) diff --git a/src/torchmetrics/detection/mean_ap.py b/src/torchmetrics/detection/mean_ap.py index 76715f4764b..711d6adad7c 100644 --- a/src/torchmetrics/detection/mean_ap.py +++ b/src/torchmetrics/detection/mean_ap.py @@ -312,19 +312,19 @@ class MeanAveragePrecision(Metric): If any score is not type float and of length 1 Example: - >>> import torch + >>> from torch import tensor >>> from torchmetrics.detection.mean_ap import MeanAveragePrecision >>> preds = [ ... dict( - ... boxes=torch.tensor([[258.0, 41.0, 606.0, 285.0]]), - ... scores=torch.tensor([0.536]), - ... labels=torch.tensor([0]), + ... boxes=tensor([[258.0, 41.0, 606.0, 285.0]]), + ... scores=tensor([0.536]), + ... labels=tensor([0]), ... ) ... ] >>> target = [ ... dict( - ... boxes=torch.tensor([[214.0, 41.0, 562.0, 285.0]]), - ... labels=torch.tensor([0]), + ... boxes=tensor([[214.0, 41.0, 562.0, 285.0]]), + ... labels=tensor([0]), ... ) ... ] >>> metric = MeanAveragePrecision() diff --git a/src/torchmetrics/functional/audio/pesq.py b/src/torchmetrics/functional/audio/pesq.py index a74d8d4d2d6..fe37954c87f 100644 --- a/src/torchmetrics/functional/audio/pesq.py +++ b/src/torchmetrics/functional/audio/pesq.py @@ -71,11 +71,11 @@ def perceptual_evaluation_speech_quality( If ``preds`` and ``target`` do not have the same shape Example: + >>> from torch import randn >>> from torchmetrics.functional.audio.pesq import perceptual_evaluation_speech_quality - >>> import torch >>> g = torch.manual_seed(1) - >>> preds = torch.randn(8000) - >>> target = torch.randn(8000) + >>> preds = randn(8000) + >>> target = randn(8000) >>> perceptual_evaluation_speech_quality(preds, target, 8000, 'nb') tensor(2.2076) >>> perceptual_evaluation_speech_quality(preds, target, 16000, 'wb') diff --git a/src/torchmetrics/functional/audio/sdr.py b/src/torchmetrics/functional/audio/sdr.py index c8b6e274b65..d6eeb495447 100644 --- a/src/torchmetrics/functional/audio/sdr.py +++ b/src/torchmetrics/functional/audio/sdr.py @@ -40,9 +40,9 @@ def _symmetric_toeplitz(vector: Tensor) -> Tensor: vector: shape [..., L] Example: + >>> from torch import tensor >>> from torchmetrics.functional.audio.sdr import _symmetric_toeplitz - >>> import torch - >>> v = torch.tensor([0, 1, 2, 3, 4]) + >>> v = tensor([0, 1, 2, 3, 4]) >>> _symmetric_toeplitz(v) tensor([[0, 1, 2, 3, 4], [1, 0, 1, 2, 3], @@ -130,8 +130,8 @@ def signal_distortion_ratio( If ``preds`` and ``target`` does not have the same shape Example: - >>> from torchmetrics.functional.audio import signal_distortion_ratio >>> import torch + >>> from torchmetrics.functional.audio import signal_distortion_ratio >>> g = torch.manual_seed(1) >>> preds = torch.randn(8000) >>> target = torch.randn(8000) diff --git a/src/torchmetrics/functional/audio/stoi.py b/src/torchmetrics/functional/audio/stoi.py index afd1e7caa09..c7151469337 100644 --- a/src/torchmetrics/functional/audio/stoi.py +++ b/src/torchmetrics/functional/audio/stoi.py @@ -61,8 +61,8 @@ def short_time_objective_intelligibility( If ``preds`` and ``target`` does not have the same shape Example: - >>> from torchmetrics.functional.audio.stoi import short_time_objective_intelligibility >>> import torch + >>> from torchmetrics.functional.audio.stoi import short_time_objective_intelligibility >>> g = torch.manual_seed(1) >>> preds = torch.randn(8000) >>> target = torch.randn(8000) diff --git a/src/torchmetrics/functional/classification/accuracy.py b/src/torchmetrics/functional/classification/accuracy.py index 445d50f2953..0589e2e6077 100644 --- a/src/torchmetrics/functional/classification/accuracy.py +++ b/src/torchmetrics/functional/classification/accuracy.py @@ -130,28 +130,25 @@ def binary_accuracy( is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import binary_accuracy - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0, 0, 1, 1, 0, 1]) >>> binary_accuracy(preds, target) tensor(0.6667) Example (preds is float tensor): >>> from torchmetrics.functional.classification import binary_accuracy - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) >>> binary_accuracy(preds, target) tensor(0.6667) Example (multidim tensors): >>> from torchmetrics.functional.classification import binary_accuracy - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> binary_accuracy(preds, target, multidim_average='samplewise') tensor([0.3333, 0.1667]) """ @@ -232,9 +229,10 @@ def multiclass_accuracy( - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multiclass_accuracy - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> multiclass_accuracy(preds, target, num_classes=3) tensor(0.8333) >>> multiclass_accuracy(preds, target, num_classes=3, average=None) @@ -242,13 +240,11 @@ def multiclass_accuracy( Example (preds is float tensor): >>> from torchmetrics.functional.classification import multiclass_accuracy - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> multiclass_accuracy(preds, target, num_classes=3) tensor(0.8333) >>> multiclass_accuracy(preds, target, num_classes=3, average=None) @@ -256,8 +252,8 @@ def multiclass_accuracy( Example (multidim tensors): >>> from torchmetrics.functional.classification import multiclass_accuracy - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) >>> multiclass_accuracy(preds, target, num_classes=3, multidim_average='samplewise') tensor([0.5000, 0.2778]) >>> multiclass_accuracy(preds, target, num_classes=3, multidim_average='samplewise', average=None) @@ -341,9 +337,10 @@ def multilabel_accuracy( - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multilabel_accuracy - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> multilabel_accuracy(preds, target, num_labels=3) tensor(0.6667) >>> multilabel_accuracy(preds, target, num_labels=3, average=None) @@ -351,8 +348,8 @@ def multilabel_accuracy( Example (preds is float tensor): >>> from torchmetrics.functional.classification import multilabel_accuracy - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> multilabel_accuracy(preds, target, num_labels=3) tensor(0.6667) >>> multilabel_accuracy(preds, target, num_labels=3, average=None) @@ -360,13 +357,9 @@ def multilabel_accuracy( Example (multidim tensors): >>> from torchmetrics.functional.classification import multilabel_accuracy - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> multilabel_accuracy(preds, target, num_labels=3, multidim_average='samplewise') tensor([0.3333, 0.1667]) >>> multilabel_accuracy(preds, target, num_labels=3, multidim_average='samplewise', average=None) @@ -407,14 +400,14 @@ def accuracy( each argument influence and examples. Legacy Example: - >>> import torch - >>> target = torch.tensor([0, 1, 2, 3]) - >>> preds = torch.tensor([0, 2, 1, 3]) + >>> from torch import tensor + >>> target = tensor([0, 1, 2, 3]) + >>> preds = tensor([0, 2, 1, 3]) >>> accuracy(preds, target, task="multiclass", num_classes=4) tensor(0.5000) - >>> target = torch.tensor([0, 1, 2]) - >>> preds = torch.tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]]) + >>> target = tensor([0, 1, 2]) + >>> preds = tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]]) >>> accuracy(preds, target, task="multiclass", num_classes=3, top_k=2) tensor(0.6667) """ diff --git a/src/torchmetrics/functional/classification/cohen_kappa.py b/src/torchmetrics/functional/classification/cohen_kappa.py index b4d9c1217a0..2adb3e0ba33 100644 --- a/src/torchmetrics/functional/classification/cohen_kappa.py +++ b/src/torchmetrics/functional/classification/cohen_kappa.py @@ -117,16 +117,17 @@ class labels. kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import binary_cohen_kappa - >>> target = torch.tensor([1, 1, 0, 0]) - >>> preds = torch.tensor([0, 1, 0, 0]) + >>> target = tensor([1, 1, 0, 0]) + >>> preds = tensor([0, 1, 0, 0]) >>> binary_cohen_kappa(preds, target) tensor(0.5000) Example (preds is float tensor): >>> from torchmetrics.functional.classification import binary_cohen_kappa - >>> target = torch.tensor([1, 1, 0, 0]) - >>> preds = torch.tensor([0.35, 0.85, 0.48, 0.01]) + >>> target = tensor([1, 1, 0, 0]) + >>> preds = tensor([0.35, 0.85, 0.48, 0.01]) >>> binary_cohen_kappa(preds, target) tensor(0.5000) """ @@ -201,21 +202,20 @@ class labels. kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (pred is integer tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multiclass_cohen_kappa - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> multiclass_cohen_kappa(preds, target, num_classes=3) tensor(0.6364) Example (pred is float tensor): >>> from torchmetrics.functional.classification import multiclass_cohen_kappa - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> multiclass_cohen_kappa(preds, target, num_classes=3) tensor(0.6364) """ @@ -253,8 +253,9 @@ class labels. each argument influence and examples. Legacy Example: - >>> target = torch.tensor([1, 1, 0, 0]) - >>> preds = torch.tensor([0, 1, 0, 0]) + >>> from torch import tensor + >>> target = tensor([1, 1, 0, 0]) + >>> preds = tensor([0, 1, 0, 0]) >>> cohen_kappa(preds, target, task="multiclass", num_classes=2) tensor(0.5000) """ diff --git a/src/torchmetrics/functional/classification/confusion_matrix.py b/src/torchmetrics/functional/classification/confusion_matrix.py index 3d5b76e1ad5..48089edb5d7 100644 --- a/src/torchmetrics/functional/classification/confusion_matrix.py +++ b/src/torchmetrics/functional/classification/confusion_matrix.py @@ -195,17 +195,18 @@ def binary_confusion_matrix( A ``[2, 2]`` tensor Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import binary_confusion_matrix - >>> target = torch.tensor([1, 1, 0, 0]) - >>> preds = torch.tensor([0, 1, 0, 0]) + >>> target = tensor([1, 1, 0, 0]) + >>> preds = tensor([0, 1, 0, 0]) >>> binary_confusion_matrix(preds, target) tensor([[2, 0], [1, 1]]) Example (preds is float tensor): >>> from torchmetrics.functional.classification import binary_confusion_matrix - >>> target = torch.tensor([1, 1, 0, 0]) - >>> preds = torch.tensor([0.35, 0.85, 0.48, 0.01]) + >>> target = tensor([1, 1, 0, 0]) + >>> preds = tensor([0.35, 0.85, 0.48, 0.01]) >>> binary_confusion_matrix(preds, target) tensor([[2, 0], [1, 1]]) @@ -379,9 +380,10 @@ def multiclass_confusion_matrix( A ``[num_classes, num_classes]`` tensor Example (pred is integer tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multiclass_confusion_matrix - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> multiclass_confusion_matrix(preds, target, num_classes=3) tensor([[1, 1, 0], [0, 1, 0], @@ -389,13 +391,11 @@ def multiclass_confusion_matrix( Example (pred is float tensor): >>> from torchmetrics.functional.classification import multiclass_confusion_matrix - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> multiclass_confusion_matrix(preds, target, num_classes=3) tensor([[1, 1, 0], [0, 1, 0], @@ -566,9 +566,10 @@ def multilabel_confusion_matrix( A ``[num_labels, 2, 2]`` tensor Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multilabel_confusion_matrix - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> multilabel_confusion_matrix(preds, target, num_labels=3) tensor([[[1, 0], [0, 1]], [[1, 0], [1, 0]], @@ -576,8 +577,8 @@ def multilabel_confusion_matrix( Example (preds is float tensor): >>> from torchmetrics.functional.classification import multilabel_confusion_matrix - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> multilabel_confusion_matrix(preds, target, num_labels=3) tensor([[[1, 0], [0, 1]], [[1, 0], [1, 0]], @@ -610,24 +611,25 @@ def confusion_matrix( the specific details of each argument influence and examples. Legacy Example: + >>> from torch import tensor >>> from torchmetrics import ConfusionMatrix - >>> target = torch.tensor([1, 1, 0, 0]) - >>> preds = torch.tensor([0, 1, 0, 0]) + >>> target = tensor([1, 1, 0, 0]) + >>> preds = tensor([0, 1, 0, 0]) >>> confmat = ConfusionMatrix(task="binary") >>> confmat(preds, target) tensor([[2, 0], [1, 1]]) - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> confmat = ConfusionMatrix(task="multiclass", num_classes=3) >>> confmat(preds, target) tensor([[1, 1, 0], [0, 1, 0], [0, 0, 1]]) - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> confmat = ConfusionMatrix(task="multilabel", num_labels=3) >>> confmat(preds, target) tensor([[[1, 0], [0, 1]], diff --git a/src/torchmetrics/functional/classification/exact_match.py b/src/torchmetrics/functional/classification/exact_match.py index 6993c3b5ed7..4cf7359e88d 100644 --- a/src/torchmetrics/functional/classification/exact_match.py +++ b/src/torchmetrics/functional/classification/exact_match.py @@ -92,16 +92,17 @@ def multiclass_exact_match( - If ``multidim_average`` is set to ``samplewise`` the output will be a tensor of shape ``(N,)`` Example (multidim tensors): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multiclass_exact_match - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]]) >>> multiclass_exact_match(preds, target, num_classes=3, multidim_average='global') tensor(0.5000) Example (multidim tensors): >>> from torchmetrics.functional.classification import multiclass_exact_match - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]]) >>> multiclass_exact_match(preds, target, num_classes=3, multidim_average='samplewise') tensor([1., 0.]) """ @@ -173,28 +174,25 @@ def multilabel_exact_match( - If ``multidim_average`` is set to ``samplewise`` the output will be a tensor of shape ``(N,)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multilabel_exact_match - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> multilabel_exact_match(preds, target, num_labels=3) tensor(0.5000) Example (preds is float tensor): >>> from torchmetrics.functional.classification import multilabel_exact_match - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> multilabel_exact_match(preds, target, num_labels=3) tensor(0.5000) Example (multidim tensors): >>> from torchmetrics.functional.classification import multilabel_exact_match - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> multilabel_exact_match(preds, target, num_labels=3, multidim_average='samplewise') tensor([0., 0.]) """ @@ -226,13 +224,14 @@ def exact_match( :func:`multiclass_exact_match` and :func:`multilabel_exact_match` for the specific details of each argument influence and examples. Legacy Example: - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]]) + >>> from torch import tensor + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]]) >>> exact_match(preds, target, task="multiclass", num_classes=3, multidim_average='global') tensor(0.5000) - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]]) >>> exact_match(preds, target, task="multiclass", num_classes=3, multidim_average='samplewise') tensor([1., 0.]) """ diff --git a/src/torchmetrics/functional/classification/f_beta.py b/src/torchmetrics/functional/classification/f_beta.py index 6c7aebba8bc..ea2769999ff 100644 --- a/src/torchmetrics/functional/classification/f_beta.py +++ b/src/torchmetrics/functional/classification/f_beta.py @@ -120,28 +120,25 @@ def binary_fbeta_score( is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import binary_fbeta_score - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0, 0, 1, 1, 0, 1]) >>> binary_fbeta_score(preds, target, beta=2.0) tensor(0.6667) Example (preds is float tensor): >>> from torchmetrics.functional.classification import binary_fbeta_score - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) >>> binary_fbeta_score(preds, target, beta=2.0) tensor(0.6667) Example (multidim tensors): >>> from torchmetrics.functional.classification import binary_fbeta_score - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> binary_fbeta_score(preds, target, beta=2.0, multidim_average='samplewise') tensor([0.5882, 0.0000]) """ @@ -234,9 +231,10 @@ def multiclass_fbeta_score( - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multiclass_fbeta_score - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3) tensor(0.7963) >>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3, average=None) @@ -244,13 +242,11 @@ def multiclass_fbeta_score( Example (preds is float tensor): >>> from torchmetrics.functional.classification import multiclass_fbeta_score - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3) tensor(0.7963) >>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3, average=None) @@ -258,8 +254,8 @@ def multiclass_fbeta_score( Example (multidim tensors): >>> from torchmetrics.functional.classification import multiclass_fbeta_score - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) >>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3, multidim_average='samplewise') tensor([0.4697, 0.2706]) >>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3, multidim_average='samplewise', average=None) @@ -356,9 +352,10 @@ def multilabel_fbeta_score( - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multilabel_fbeta_score - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> multilabel_fbeta_score(preds, target, beta=2.0, num_labels=3) tensor(0.6111) >>> multilabel_fbeta_score(preds, target, beta=2.0, num_labels=3, average=None) @@ -366,8 +363,8 @@ def multilabel_fbeta_score( Example (preds is float tensor): >>> from torchmetrics.functional.classification import multilabel_fbeta_score - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> multilabel_fbeta_score(preds, target, beta=2.0, num_labels=3) tensor(0.6111) >>> multilabel_fbeta_score(preds, target, beta=2.0, num_labels=3, average=None) @@ -375,13 +372,9 @@ def multilabel_fbeta_score( Example (multidim tensors): >>> from torchmetrics.functional.classification import multilabel_fbeta_score - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> multilabel_fbeta_score(preds, target, num_labels=3, beta=2.0, multidim_average='samplewise') tensor([0.5556, 0.0000]) >>> multilabel_fbeta_score(preds, target, num_labels=3, beta=2.0, multidim_average='samplewise', average=None) @@ -440,28 +433,25 @@ def binary_f1_score( is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import binary_f1_score - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0, 0, 1, 1, 0, 1]) >>> binary_f1_score(preds, target) tensor(0.6667) Example (preds is float tensor): >>> from torchmetrics.functional.classification import binary_f1_score - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) >>> binary_f1_score(preds, target) tensor(0.6667) Example (multidim tensors): >>> from torchmetrics.functional.classification import binary_f1_score - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> binary_f1_score(preds, target, multidim_average='samplewise') tensor([0.5000, 0.0000]) """ @@ -541,9 +531,10 @@ def multiclass_f1_score( - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multiclass_f1_score - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> multiclass_f1_score(preds, target, num_classes=3) tensor(0.7778) >>> multiclass_f1_score(preds, target, num_classes=3, average=None) @@ -551,13 +542,11 @@ def multiclass_f1_score( Example (preds is float tensor): >>> from torchmetrics.functional.classification import multiclass_f1_score - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> multiclass_f1_score(preds, target, num_classes=3) tensor(0.7778) >>> multiclass_f1_score(preds, target, num_classes=3, average=None) @@ -565,8 +554,8 @@ def multiclass_f1_score( Example (multidim tensors): >>> from torchmetrics.functional.classification import multiclass_f1_score - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) >>> multiclass_f1_score(preds, target, num_classes=3, multidim_average='samplewise') tensor([0.4333, 0.2667]) >>> multiclass_f1_score(preds, target, num_classes=3, multidim_average='samplewise', average=None) @@ -650,9 +639,10 @@ def multilabel_f1_score( - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multilabel_f1_score - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> multilabel_f1_score(preds, target, num_labels=3) tensor(0.5556) >>> multilabel_f1_score(preds, target, num_labels=3, average=None) @@ -660,8 +650,8 @@ def multilabel_f1_score( Example (preds is float tensor): >>> from torchmetrics.functional.classification import multilabel_f1_score - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> multilabel_f1_score(preds, target, num_labels=3) tensor(0.5556) >>> multilabel_f1_score(preds, target, num_labels=3, average=None) @@ -669,13 +659,9 @@ def multilabel_f1_score( Example (multidim tensors): >>> from torchmetrics.functional.classification import multilabel_f1_score - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> multilabel_f1_score(preds, target, num_labels=3, multidim_average='samplewise') tensor([0.4444, 0.0000]) >>> multilabel_f1_score(preds, target, num_labels=3, multidim_average='samplewise', average=None) @@ -721,8 +707,9 @@ def fbeta_score( details of each argument influence and examples. Legacy Example: - >>> target = torch.tensor([0, 1, 2, 0, 1, 2]) - >>> preds = torch.tensor([0, 2, 1, 0, 0, 1]) + >>> from torch import tensor + >>> target = tensor([0, 1, 2, 0, 1, 2]) + >>> preds = tensor([0, 2, 1, 0, 0, 1]) >>> fbeta_score(preds, target, task="multiclass", num_classes=3, beta=0.5) tensor(0.3333) """ @@ -769,8 +756,9 @@ def f1_score( details of each argument influence and examples. Legacy Example: - >>> target = torch.tensor([0, 1, 2, 0, 1, 2]) - >>> preds = torch.tensor([0, 2, 1, 0, 0, 1]) + >>> from torch import tensor + >>> target = tensor([0, 1, 2, 0, 1, 2]) + >>> preds = tensor([0, 2, 1, 0, 0, 1]) >>> f1_score(preds, target, task="multiclass", num_classes=3) tensor(0.3333) """ diff --git a/src/torchmetrics/functional/classification/hamming.py b/src/torchmetrics/functional/classification/hamming.py index 84da16735b1..438c2e6390f 100644 --- a/src/torchmetrics/functional/classification/hamming.py +++ b/src/torchmetrics/functional/classification/hamming.py @@ -133,28 +133,25 @@ def binary_hamming_distance( is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import binary_hamming_distance - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0, 0, 1, 1, 0, 1]) >>> binary_hamming_distance(preds, target) tensor(0.3333) Example (preds is float tensor): >>> from torchmetrics.functional.classification import binary_hamming_distance - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) >>> binary_hamming_distance(preds, target) tensor(0.3333) Example (multidim tensors): >>> from torchmetrics.functional.classification import binary_hamming_distance - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> binary_hamming_distance(preds, target, multidim_average='samplewise') tensor([0.6667, 0.8333]) """ @@ -236,9 +233,10 @@ def multiclass_hamming_distance( - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multiclass_hamming_distance - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> multiclass_hamming_distance(preds, target, num_classes=3) tensor(0.1667) >>> multiclass_hamming_distance(preds, target, num_classes=3, average=None) @@ -246,13 +244,11 @@ def multiclass_hamming_distance( Example (preds is float tensor): >>> from torchmetrics.functional.classification import multiclass_hamming_distance - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> multiclass_hamming_distance(preds, target, num_classes=3) tensor(0.1667) >>> multiclass_hamming_distance(preds, target, num_classes=3, average=None) @@ -260,8 +256,8 @@ def multiclass_hamming_distance( Example (multidim tensors): >>> from torchmetrics.functional.classification import multiclass_hamming_distance - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) >>> multiclass_hamming_distance(preds, target, num_classes=3, multidim_average='samplewise') tensor([0.5000, 0.7222]) >>> multiclass_hamming_distance(preds, target, num_classes=3, multidim_average='samplewise', average=None) @@ -346,9 +342,10 @@ def multilabel_hamming_distance( - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multilabel_hamming_distance - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> multilabel_hamming_distance(preds, target, num_labels=3) tensor(0.3333) >>> multilabel_hamming_distance(preds, target, num_labels=3, average=None) @@ -356,8 +353,8 @@ def multilabel_hamming_distance( Example (preds is float tensor): >>> from torchmetrics.functional.classification import multilabel_hamming_distance - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> multilabel_hamming_distance(preds, target, num_labels=3) tensor(0.3333) >>> multilabel_hamming_distance(preds, target, num_labels=3, average=None) @@ -365,13 +362,9 @@ def multilabel_hamming_distance( Example (multidim tensors): >>> from torchmetrics.functional.classification import multilabel_hamming_distance - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> multilabel_hamming_distance(preds, target, num_labels=3, multidim_average='samplewise') tensor([0.6667, 0.8333]) >>> multilabel_hamming_distance(preds, target, num_labels=3, multidim_average='samplewise', average=None) @@ -414,8 +407,9 @@ def hamming_distance( the specific details of each argument influence and examples. Legacy Example: - >>> target = torch.tensor([[0, 1], [1, 1]]) - >>> preds = torch.tensor([[0, 1], [0, 1]]) + >>> from torch import tensor + >>> target = tensor([[0, 1], [1, 1]]) + >>> preds = tensor([[0, 1], [0, 1]]) >>> hamming_distance(preds, target, task="binary") tensor(0.2500) """ diff --git a/src/torchmetrics/functional/classification/hinge.py b/src/torchmetrics/functional/classification/hinge.py index adc70e9dd39..0d27574b97a 100644 --- a/src/torchmetrics/functional/classification/hinge.py +++ b/src/torchmetrics/functional/classification/hinge.py @@ -103,9 +103,10 @@ def binary_hinge_loss( Set to ``False`` for faster computations. Example: + >>> from torch import tensor >>> from torchmetrics.functional.classification import binary_hinge_loss - >>> preds = torch.tensor([0.25, 0.25, 0.55, 0.75, 0.75]) - >>> target = torch.tensor([0, 0, 1, 1, 1]) + >>> preds = tensor([0.25, 0.25, 0.55, 0.75, 0.75]) + >>> target = tensor([0, 0, 1, 1, 1]) >>> binary_hinge_loss(preds, target) tensor(0.6900) >>> binary_hinge_loss(preds, target, squared=True) @@ -219,12 +220,13 @@ def multiclass_hinge_loss( Set to ``False`` for faster computations. Example: + >>> from torch import tensor >>> from torchmetrics.functional.classification import multiclass_hinge_loss - >>> preds = torch.tensor([[0.25, 0.20, 0.55], - ... [0.55, 0.05, 0.40], - ... [0.10, 0.30, 0.60], - ... [0.90, 0.05, 0.05]]) - >>> target = torch.tensor([0, 1, 2, 0]) + >>> preds = tensor([[0.25, 0.20, 0.55], + ... [0.55, 0.05, 0.40], + ... [0.10, 0.30, 0.60], + ... [0.90, 0.05, 0.05]]) + >>> target = tensor([0, 1, 2, 0]) >>> multiclass_hinge_loss(preds, target, num_classes=3) tensor(0.9125) >>> multiclass_hinge_loss(preds, target, num_classes=3, squared=True) @@ -258,19 +260,19 @@ def hinge_loss( each argument influence and examples. Legacy Example: - >>> import torch - >>> target = torch.tensor([0, 1, 1]) - >>> preds = torch.tensor([0.5, 0.7, 0.1]) + >>> from torch import tensor + >>> target = tensor([0, 1, 1]) + >>> preds = tensor([0.5, 0.7, 0.1]) >>> hinge_loss(preds, target, task="binary") tensor(0.9000) - >>> target = torch.tensor([0, 1, 2]) - >>> preds = torch.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) + >>> target = tensor([0, 1, 2]) + >>> preds = tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) >>> hinge_loss(preds, target, task="multiclass", num_classes=3) tensor(1.5551) - >>> target = torch.tensor([0, 1, 2]) - >>> preds = torch.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) + >>> target = tensor([0, 1, 2]) + >>> preds = tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) >>> hinge_loss(preds, target, task="multiclass", num_classes=3, multiclass_mode="one-vs-all") tensor([1.3743, 1.1945, 1.2359]) """ diff --git a/src/torchmetrics/functional/classification/jaccard.py b/src/torchmetrics/functional/classification/jaccard.py index 13daec19cb8..f593a424fd2 100644 --- a/src/torchmetrics/functional/classification/jaccard.py +++ b/src/torchmetrics/functional/classification/jaccard.py @@ -114,16 +114,17 @@ def binary_jaccard_index( kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import binary_jaccard_index - >>> target = torch.tensor([1, 1, 0, 0]) - >>> preds = torch.tensor([0, 1, 0, 0]) + >>> target = tensor([1, 1, 0, 0]) + >>> preds = tensor([0, 1, 0, 0]) >>> binary_jaccard_index(preds, target) tensor(0.5000) Example (preds is float tensor): >>> from torchmetrics.functional.classification import binary_jaccard_index - >>> target = torch.tensor([1, 1, 0, 0]) - >>> preds = torch.tensor([0.35, 0.85, 0.48, 0.01]) + >>> target = tensor([1, 1, 0, 0]) + >>> preds = tensor([0.35, 0.85, 0.48, 0.01]) >>> binary_jaccard_index(preds, target) tensor(0.5000) """ @@ -187,21 +188,20 @@ def multiclass_jaccard_index( kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (pred is integer tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multiclass_jaccard_index - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> multiclass_jaccard_index(preds, target, num_classes=3) tensor(0.6667) Example (pred is float tensor): >>> from torchmetrics.functional.classification import multiclass_jaccard_index - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> multiclass_jaccard_index(preds, target, num_classes=3) tensor(0.6667) """ @@ -268,16 +268,17 @@ def multilabel_jaccard_index( kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multilabel_jaccard_index - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> multilabel_jaccard_index(preds, target, num_labels=3) tensor(0.5000) Example (preds is float tensor): >>> from torchmetrics.functional.classification import multilabel_jaccard_index - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> multilabel_jaccard_index(preds, target, num_labels=3) tensor(0.5000) """ @@ -313,8 +314,9 @@ def jaccard_index( the specific details of each argument influence and examples. Legacy Example: - >>> target = torch.randint(0, 2, (10, 25, 25)) - >>> pred = torch.tensor(target) + >>> from torch import randint, tensor + >>> target = randint(0, 2, (10, 25, 25)) + >>> pred = tensor(target) >>> pred[2:5, 7:13, 9:15] = 1 - pred[2:5, 7:13, 9:15] >>> jaccard_index(pred, target, task="multiclass", num_classes=2) tensor(0.9660) diff --git a/src/torchmetrics/functional/classification/matthews_corrcoef.py b/src/torchmetrics/functional/classification/matthews_corrcoef.py index 7535b56d049..cc5f94b70e2 100644 --- a/src/torchmetrics/functional/classification/matthews_corrcoef.py +++ b/src/torchmetrics/functional/classification/matthews_corrcoef.py @@ -83,16 +83,17 @@ def binary_matthews_corrcoef( kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import binary_matthews_corrcoef - >>> target = torch.tensor([1, 1, 0, 0]) - >>> preds = torch.tensor([0, 1, 0, 0]) + >>> target = tensor([1, 1, 0, 0]) + >>> preds = tensor([0, 1, 0, 0]) >>> binary_matthews_corrcoef(preds, target) tensor(0.5774) Example (preds is float tensor): >>> from torchmetrics.functional.classification import binary_matthews_corrcoef - >>> target = torch.tensor([1, 1, 0, 0]) - >>> preds = torch.tensor([0.35, 0.85, 0.48, 0.01]) + >>> target = tensor([1, 1, 0, 0]) + >>> preds = tensor([0.35, 0.85, 0.48, 0.01]) >>> binary_matthews_corrcoef(preds, target) tensor(0.5774) """ @@ -132,21 +133,20 @@ def multiclass_matthews_corrcoef( kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (pred is integer tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multiclass_matthews_corrcoef - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> multiclass_matthews_corrcoef(preds, target, num_classes=3) tensor(0.7000) Example (pred is float tensor): >>> from torchmetrics.functional.classification import multiclass_matthews_corrcoef - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> multiclass_matthews_corrcoef(preds, target, num_classes=3) tensor(0.7000) """ @@ -188,16 +188,17 @@ def multilabel_matthews_corrcoef( kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multilabel_matthews_corrcoef - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> multilabel_matthews_corrcoef(preds, target, num_labels=3) tensor(0.3333) Example (preds is float tensor): >>> from torchmetrics.functional.classification import multilabel_matthews_corrcoef - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> multilabel_matthews_corrcoef(preds, target, num_labels=3) tensor(0.3333) """ @@ -228,8 +229,9 @@ def matthews_corrcoef( the specific details of each argument influence and examples. Legacy Example: - >>> target = torch.tensor([1, 1, 0, 0]) - >>> preds = torch.tensor([0, 1, 0, 0]) + >>> from torch import tensor + >>> target = tensor([1, 1, 0, 0]) + >>> preds = tensor([0, 1, 0, 0]) >>> matthews_corrcoef(preds, target, task="multiclass", num_classes=2) tensor(0.5774) """ diff --git a/src/torchmetrics/functional/classification/precision_recall.py b/src/torchmetrics/functional/classification/precision_recall.py index 1c2f20e0e22..0c0ceef895c 100644 --- a/src/torchmetrics/functional/classification/precision_recall.py +++ b/src/torchmetrics/functional/classification/precision_recall.py @@ -108,28 +108,25 @@ def binary_precision( is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import binary_precision - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0, 0, 1, 1, 0, 1]) >>> binary_precision(preds, target) tensor(0.6667) Example (preds is float tensor): >>> from torchmetrics.functional.classification import binary_precision - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) >>> binary_precision(preds, target) tensor(0.6667) Example (multidim tensors): >>> from torchmetrics.functional.classification import binary_precision - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> binary_precision(preds, target, multidim_average='samplewise') tensor([0.4000, 0.0000]) """ @@ -209,9 +206,10 @@ def multiclass_precision( - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multiclass_precision - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> multiclass_precision(preds, target, num_classes=3) tensor(0.8333) >>> multiclass_precision(preds, target, num_classes=3, average=None) @@ -219,13 +217,11 @@ def multiclass_precision( Example (preds is float tensor): >>> from torchmetrics.functional.classification import multiclass_precision - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> multiclass_precision(preds, target, num_classes=3) tensor(0.8333) >>> multiclass_precision(preds, target, num_classes=3, average=None) @@ -233,8 +229,8 @@ def multiclass_precision( Example (multidim tensors): >>> from torchmetrics.functional.classification import multiclass_precision - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) >>> multiclass_precision(preds, target, num_classes=3, multidim_average='samplewise') tensor([0.3889, 0.2778]) >>> multiclass_precision(preds, target, num_classes=3, multidim_average='samplewise', average=None) @@ -317,9 +313,10 @@ def multilabel_precision( - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multilabel_precision - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> multilabel_precision(preds, target, num_labels=3) tensor(0.5000) >>> multilabel_precision(preds, target, num_labels=3, average=None) @@ -327,8 +324,8 @@ def multilabel_precision( Example (preds is float tensor): >>> from torchmetrics.functional.classification import multilabel_precision - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> multilabel_precision(preds, target, num_labels=3) tensor(0.5000) >>> multilabel_precision(preds, target, num_labels=3, average=None) @@ -336,13 +333,9 @@ def multilabel_precision( Example (multidim tensors): >>> from torchmetrics.functional.classification import multilabel_precision - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> multilabel_precision(preds, target, num_labels=3, multidim_average='samplewise') tensor([0.3333, 0.0000]) >>> multilabel_precision(preds, target, num_labels=3, multidim_average='samplewise', average=None) @@ -403,28 +396,25 @@ def binary_recall( is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import binary_recall - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0, 0, 1, 1, 0, 1]) >>> binary_recall(preds, target) tensor(0.6667) Example (preds is float tensor): >>> from torchmetrics.functional.classification import binary_recall - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) >>> binary_recall(preds, target) tensor(0.6667) Example (multidim tensors): >>> from torchmetrics.functional.classification import binary_recall - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> binary_recall(preds, target, multidim_average='samplewise') tensor([0.6667, 0.0000]) """ @@ -504,9 +494,10 @@ def multiclass_recall( - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multiclass_recall - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> multiclass_recall(preds, target, num_classes=3) tensor(0.8333) >>> multiclass_recall(preds, target, num_classes=3, average=None) @@ -514,13 +505,11 @@ def multiclass_recall( Example (preds is float tensor): >>> from torchmetrics.functional.classification import multiclass_recall - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> multiclass_recall(preds, target, num_classes=3) tensor(0.8333) >>> multiclass_recall(preds, target, num_classes=3, average=None) @@ -528,8 +517,8 @@ def multiclass_recall( Example (multidim tensors): >>> from torchmetrics.functional.classification import multiclass_recall - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) >>> multiclass_recall(preds, target, num_classes=3, multidim_average='samplewise') tensor([0.5000, 0.2778]) >>> multiclass_recall(preds, target, num_classes=3, multidim_average='samplewise', average=None) @@ -612,9 +601,10 @@ def multilabel_recall( - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multilabel_recall - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> multilabel_recall(preds, target, num_labels=3) tensor(0.6667) >>> multilabel_recall(preds, target, num_labels=3, average=None) @@ -622,8 +612,8 @@ def multilabel_recall( Example (preds is float tensor): >>> from torchmetrics.functional.classification import multilabel_recall - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> multilabel_recall(preds, target, num_labels=3) tensor(0.6667) >>> multilabel_recall(preds, target, num_labels=3, average=None) @@ -631,13 +621,9 @@ def multilabel_recall( Example (multidim tensors): >>> from torchmetrics.functional.classification import multilabel_recall - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> multilabel_recall(preds, target, num_labels=3, multidim_average='samplewise') tensor([0.6667, 0.0000]) >>> multilabel_recall(preds, target, num_labels=3, multidim_average='samplewise', average=None) @@ -678,8 +664,9 @@ def precision( each argument influence and examples. Legacy Example: - >>> preds = torch.tensor([2, 0, 2, 1]) - >>> target = torch.tensor([1, 1, 2, 0]) + >>> from torch import tensor + >>> preds = tensor([2, 0, 2, 1]) + >>> target = tensor([1, 1, 2, 0]) >>> precision(preds, target, task="multiclass", average='macro', num_classes=3) tensor(0.1667) >>> precision(preds, target, task="multiclass", average='micro', num_classes=3) @@ -730,8 +717,9 @@ def recall( each argument influence and examples. Legacy Example: - >>> preds = torch.tensor([2, 0, 2, 1]) - >>> target = torch.tensor([1, 1, 2, 0]) + >>> from torch import tensor + >>> preds = tensor([2, 0, 2, 1]) + >>> target = tensor([1, 1, 2, 0]) >>> recall(preds, target, task="multiclass", average='macro', num_classes=3) tensor(0.3333) >>> recall(preds, target, task="multiclass", average='micro', num_classes=3) diff --git a/src/torchmetrics/functional/classification/specificity.py b/src/torchmetrics/functional/classification/specificity.py index b9be98e100e..666ceb38833 100644 --- a/src/torchmetrics/functional/classification/specificity.py +++ b/src/torchmetrics/functional/classification/specificity.py @@ -105,28 +105,25 @@ def binary_specificity( is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample. Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import binary_specificity - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0, 0, 1, 1, 0, 1]) >>> binary_specificity(preds, target) tensor(0.6667) Example (preds is float tensor): >>> from torchmetrics.functional.classification import binary_specificity - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) >>> binary_specificity(preds, target) tensor(0.6667) Example (multidim tensors): >>> from torchmetrics.functional.classification import binary_specificity - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> binary_specificity(preds, target, multidim_average='samplewise') tensor([0.0000, 0.3333]) """ @@ -206,9 +203,10 @@ def multiclass_specificity( - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multiclass_specificity - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> multiclass_specificity(preds, target, num_classes=3) tensor(0.8889) >>> multiclass_specificity(preds, target, num_classes=3, average=None) @@ -216,13 +214,11 @@ def multiclass_specificity( Example (preds is float tensor): >>> from torchmetrics.functional.classification import multiclass_specificity - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> multiclass_specificity(preds, target, num_classes=3) tensor(0.8889) >>> multiclass_specificity(preds, target, num_classes=3, average=None) @@ -230,8 +226,8 @@ def multiclass_specificity( Example (multidim tensors): >>> from torchmetrics.functional.classification import multiclass_specificity - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) >>> multiclass_specificity(preds, target, num_classes=3, multidim_average='samplewise') tensor([0.7500, 0.6556]) >>> multiclass_specificity(preds, target, num_classes=3, multidim_average='samplewise', average=None) @@ -314,9 +310,10 @@ def multilabel_specificity( - If ``average=None/'none'``, the shape will be ``(N, C)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multilabel_specificity - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> multilabel_specificity(preds, target, num_labels=3) tensor(0.6667) >>> multilabel_specificity(preds, target, num_labels=3, average=None) @@ -324,8 +321,8 @@ def multilabel_specificity( Example (preds is float tensor): >>> from torchmetrics.functional.classification import multilabel_specificity - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> multilabel_specificity(preds, target, num_labels=3) tensor(0.6667) >>> multilabel_specificity(preds, target, num_labels=3, average=None) @@ -333,13 +330,9 @@ def multilabel_specificity( Example (multidim tensors): >>> from torchmetrics.functional.classification import multilabel_specificity - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> multilabel_specificity(preds, target, num_labels=3, multidim_average='samplewise') tensor([0.0000, 0.3333]) >>> multilabel_specificity(preds, target, num_labels=3, multidim_average='samplewise', average=None) @@ -380,8 +373,9 @@ def specificity( details of each argument influence and examples. LegacyExample: - >>> preds = torch.tensor([2, 0, 2, 1]) - >>> target = torch.tensor([1, 1, 2, 0]) + >>> from torch import tensor + >>> preds = tensor([2, 0, 2, 1]) + >>> target = tensor([1, 1, 2, 0]) >>> specificity(preds, target, task="multiclass", average='macro', num_classes=3) tensor(0.6111) >>> specificity(preds, target, task="multiclass", average='micro', num_classes=3) diff --git a/src/torchmetrics/functional/classification/stat_scores.py b/src/torchmetrics/functional/classification/stat_scores.py index 6571aaccbac..4f768dd6abe 100644 --- a/src/torchmetrics/functional/classification/stat_scores.py +++ b/src/torchmetrics/functional/classification/stat_scores.py @@ -181,28 +181,25 @@ def binary_stat_scores( - If ``multidim_average`` is set to ``samplewise``, the shape will be ``(N, 5)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import binary_stat_scores - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0, 0, 1, 1, 0, 1]) >>> binary_stat_scores(preds, target) tensor([2, 1, 2, 1, 3]) Example (preds is float tensor): >>> from torchmetrics.functional.classification import binary_stat_scores - >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) - >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> target = tensor([0, 1, 0, 1, 0, 1]) + >>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) >>> binary_stat_scores(preds, target) tensor([2, 1, 2, 1, 3]) Example (multidim tensors): >>> from torchmetrics.functional.classification import binary_stat_scores - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> binary_stat_scores(preds, target, multidim_average='samplewise') tensor([[2, 3, 0, 1, 3], [0, 2, 1, 3, 3]]) @@ -514,9 +511,10 @@ def multiclass_stat_scores( - If ``average=None/'none'``, the shape will be ``(N, C, 5)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multiclass_stat_scores - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([2, 1, 0, 1]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([2, 1, 0, 1]) >>> multiclass_stat_scores(preds, target, num_classes=3, average='micro') tensor([3, 1, 7, 1, 4]) >>> multiclass_stat_scores(preds, target, num_classes=3, average=None) @@ -526,13 +524,11 @@ def multiclass_stat_scores( Example (preds is float tensor): >>> from torchmetrics.functional.classification import multiclass_stat_scores - >>> target = torch.tensor([2, 1, 0, 0]) - >>> preds = torch.tensor([ - ... [0.16, 0.26, 0.58], - ... [0.22, 0.61, 0.17], - ... [0.71, 0.09, 0.20], - ... [0.05, 0.82, 0.13], - ... ]) + >>> target = tensor([2, 1, 0, 0]) + >>> preds = tensor([[0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13]]) >>> multiclass_stat_scores(preds, target, num_classes=3, average='micro') tensor([3, 1, 7, 1, 4]) >>> multiclass_stat_scores(preds, target, num_classes=3, average=None) @@ -542,8 +538,8 @@ def multiclass_stat_scores( Example (multidim tensors): >>> from torchmetrics.functional.classification import multiclass_stat_scores - >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) - >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) >>> multiclass_stat_scores(preds, target, num_classes=3, multidim_average='samplewise', average='micro') tensor([[3, 3, 9, 3, 6], [2, 4, 8, 4, 6]]) @@ -770,9 +766,10 @@ def multilabel_stat_scores( - If ``average=None/'none'``, the shape will be ``(N, C, 5)`` Example (preds is int tensor): + >>> from torch import tensor >>> from torchmetrics.functional.classification import multilabel_stat_scores - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0, 0, 1], [1, 0, 1]]) >>> multilabel_stat_scores(preds, target, num_labels=3, average='micro') tensor([2, 1, 2, 1, 3]) >>> multilabel_stat_scores(preds, target, num_labels=3, average=None) @@ -782,8 +779,8 @@ def multilabel_stat_scores( Example (preds is float tensor): >>> from torchmetrics.functional.classification import multilabel_stat_scores - >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) - >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> target = tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) >>> multilabel_stat_scores(preds, target, num_labels=3, average='micro') tensor([2, 1, 2, 1, 3]) >>> multilabel_stat_scores(preds, target, num_labels=3, average=None) @@ -793,13 +790,9 @@ def multilabel_stat_scores( Example (multidim tensors): >>> from torchmetrics.functional.classification import multilabel_stat_scores - >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) - >>> preds = torch.tensor( - ... [ - ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], - ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], - ... ] - ... ) + >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]]) >>> multilabel_stat_scores(preds, target, num_labels=3, multidim_average='samplewise', average='micro') tensor([[2, 3, 0, 1, 3], [0, 2, 1, 3, 3]]) @@ -1097,8 +1090,9 @@ def stat_scores( details of each argument influence and examples. Legacy Example: - >>> preds = torch.tensor([1, 0, 2, 1]) - >>> target = torch.tensor([1, 1, 2, 0]) + >>> from torch import tensor + >>> preds = tensor([1, 0, 2, 1]) + >>> target = tensor([1, 1, 2, 0]) >>> stat_scores(preds, target, task='multiclass', num_classes=3, average='micro') tensor([2, 2, 6, 2, 4]) >>> stat_scores(preds, target, task='multiclass', num_classes=3, average=None) diff --git a/src/torchmetrics/functional/regression/kl_divergence.py b/src/torchmetrics/functional/regression/kl_divergence.py index 4a4da4b8b07..a7d64ebfa91 100644 --- a/src/torchmetrics/functional/regression/kl_divergence.py +++ b/src/torchmetrics/functional/regression/kl_divergence.py @@ -102,9 +102,9 @@ def kl_divergence( - ``'none'`` or ``None``: Returns score per sample Example: - >>> import torch - >>> p = torch.tensor([[0.36, 0.48, 0.16]]) - >>> q = torch.tensor([[1/3, 1/3, 1/3]]) + >>> from torch import tensor + >>> p = tensor([[0.36, 0.48, 0.16]]) + >>> q = tensor([[1/3, 1/3, 1/3]]) >>> kl_divergence(p, q) tensor(0.0853) """ diff --git a/src/torchmetrics/functional/retrieval/hit_rate.py b/src/torchmetrics/functional/retrieval/hit_rate.py index 5712b2c8e79..f8cdf857e50 100644 --- a/src/torchmetrics/functional/retrieval/hit_rate.py +++ b/src/torchmetrics/functional/retrieval/hit_rate.py @@ -14,7 +14,7 @@ from typing import Optional import torch -from torch import Tensor, tensor +from torch import Tensor from torchmetrics.utilities.checks import _check_retrieval_functional_inputs @@ -40,6 +40,7 @@ def retrieval_hit_rate(preds: Tensor, target: Tensor, k: Optional[int] = None) - If ``k`` parameter is not `None` or an integer larger than 0 Example: + >>> from torch import tensor >>> preds = tensor([0.2, 0.3, 0.5]) >>> target = tensor([True, False, True]) >>> retrieval_hit_rate(preds, target, k=2) diff --git a/src/torchmetrics/functional/retrieval/precision_recall_curve.py b/src/torchmetrics/functional/retrieval/precision_recall_curve.py index 74455a7dd24..eb307d70049 100644 --- a/src/torchmetrics/functional/retrieval/precision_recall_curve.py +++ b/src/torchmetrics/functional/retrieval/precision_recall_curve.py @@ -14,7 +14,7 @@ from typing import Optional, Tuple import torch -from torch import Tensor, cumsum, tensor +from torch import Tensor, cumsum from torch.nn.functional import pad from torchmetrics.utilities.checks import _check_retrieval_functional_inputs @@ -57,6 +57,7 @@ def retrieval_precision_recall_curve( If ``adaptive_k`` is not boolean. Example: + >>> from torch import tensor >>> from torchmetrics.functional import retrieval_precision_recall_curve >>> preds = tensor([0.2, 0.3, 0.5]) >>> target = tensor([True, False, True]) diff --git a/src/torchmetrics/image/ssim.py b/src/torchmetrics/image/ssim.py index ecd39f33cad..282fb8c2940 100644 --- a/src/torchmetrics/image/ssim.py +++ b/src/torchmetrics/image/ssim.py @@ -60,8 +60,8 @@ class StructuralSimilarityIndexMeasure(Metric): kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example: - >>> from torchmetrics import StructuralSimilarityIndexMeasure >>> import torch + >>> from torchmetrics import StructuralSimilarityIndexMeasure >>> preds = torch.rand([3, 3, 256, 256]) >>> target = preds * 0.75 >>> ssim = StructuralSimilarityIndexMeasure(data_range=1.0) diff --git a/src/torchmetrics/regression/concordance.py b/src/torchmetrics/regression/concordance.py index c5e1e892646..d6c40ef6c24 100644 --- a/src/torchmetrics/regression/concordance.py +++ b/src/torchmetrics/regression/concordance.py @@ -45,18 +45,17 @@ class ConcordanceCorrCoef(PearsonCorrCoef): Example (single output regression): >>> from torchmetrics import ConcordanceCorrCoef - >>> import torch - >>> target = torch.tensor([3, -0.5, 2, 7]) - >>> preds = torch.tensor([2.5, 0.0, 2, 8]) + >>> from torch import tensor + >>> target = tensor([3, -0.5, 2, 7]) + >>> preds = tensor([2.5, 0.0, 2, 8]) >>> concordance = ConcordanceCorrCoef() >>> concordance(preds, target) tensor(0.9777) Example (multi output regression): >>> from torchmetrics import ConcordanceCorrCoef - >>> import torch - >>> target = torch.tensor([[3, -0.5], [2, 7]]) - >>> preds = torch.tensor([[2.5, 0.0], [2, 8]]) + >>> target = tensor([[3, -0.5], [2, 7]]) + >>> preds = tensor([[2.5, 0.0], [2, 8]]) >>> concordance = ConcordanceCorrCoef(num_outputs=2) >>> concordance(preds, target) tensor([0.7273, 0.9887]) diff --git a/src/torchmetrics/regression/cosine_similarity.py b/src/torchmetrics/regression/cosine_similarity.py index 36bd0ec133c..d5d7bd7100c 100644 --- a/src/torchmetrics/regression/cosine_similarity.py +++ b/src/torchmetrics/regression/cosine_similarity.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any, List -import torch from torch import Tensor from typing_extensions import Literal @@ -45,9 +44,10 @@ class CosineSimilarity(Metric): kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example: + >>> from torch import tensor >>> from torchmetrics import CosineSimilarity - >>> target = torch.tensor([[0, 1], [1, 1]]) - >>> preds = torch.tensor([[0, 1], [0, 1]]) + >>> target = tensor([[0, 1], [1, 1]]) + >>> preds = tensor([[0, 1], [0, 1]]) >>> cosine_similarity = CosineSimilarity(reduction = 'mean') >>> cosine_similarity(preds, target) tensor(0.8536) diff --git a/src/torchmetrics/regression/explained_variance.py b/src/torchmetrics/regression/explained_variance.py index 023a1edaf06..69b7b6adbf0 100644 --- a/src/torchmetrics/regression/explained_variance.py +++ b/src/torchmetrics/regression/explained_variance.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any, Sequence, Union -import torch from torch import Tensor, tensor from torchmetrics.functional.regression.explained_variance import ( @@ -60,15 +59,16 @@ class ExplainedVariance(Metric): If ``multioutput`` is not one of ``"raw_values"``, ``"uniform_average"`` or ``"variance_weighted"``. Example: + >>> from torch import tensor >>> from torchmetrics import ExplainedVariance - >>> target = torch.tensor([3, -0.5, 2, 7]) - >>> preds = torch.tensor([2.5, 0.0, 2, 8]) + >>> target = tensor([3, -0.5, 2, 7]) + >>> preds = tensor([2.5, 0.0, 2, 8]) >>> explained_variance = ExplainedVariance() >>> explained_variance(preds, target) tensor(0.9572) - >>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]]) - >>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]]) + >>> target = tensor([[0.5, 1], [-1, 1], [7, -6]]) + >>> preds = tensor([[0, 2], [-1, 2], [8, -5]]) >>> explained_variance = ExplainedVariance(multioutput='raw_values') >>> explained_variance(preds, target) tensor([0.9677, 1.0000]) diff --git a/src/torchmetrics/regression/kendall.py b/src/torchmetrics/regression/kendall.py index d3a42de51c2..86782a070e0 100644 --- a/src/torchmetrics/regression/kendall.py +++ b/src/torchmetrics/regression/kendall.py @@ -74,37 +74,34 @@ class KendallRankCorrCoef(Metric): ValueError: If ``t_test=True`` and ``alternative=None`` Example (single output regression): - >>> import torch + >>> from torch import tensor >>> from torchmetrics.regression import KendallRankCorrCoef - >>> preds = torch.tensor([2.5, 0.0, 2, 8]) - >>> target = torch.tensor([3, -0.5, 2, 1]) + >>> preds = tensor([2.5, 0.0, 2, 8]) + >>> target = tensor([3, -0.5, 2, 1]) >>> kendall = KendallRankCorrCoef() >>> kendall(preds, target) tensor(0.3333) Example (multi output regression): - >>> import torch >>> from torchmetrics.regression import KendallRankCorrCoef - >>> preds = torch.tensor([[2.5, 0.0], [2, 8]]) - >>> target = torch.tensor([[3, -0.5], [2, 1]]) + >>> preds = tensor([[2.5, 0.0], [2, 8]]) + >>> target = tensor([[3, -0.5], [2, 1]]) >>> kendall = KendallRankCorrCoef(num_outputs=2) >>> kendall(preds, target) tensor([1., 1.]) Example (single output regression with t-test): - >>> import torch >>> from torchmetrics.regression import KendallRankCorrCoef - >>> preds = torch.tensor([2.5, 0.0, 2, 8]) - >>> target = torch.tensor([3, -0.5, 2, 1]) + >>> preds = tensor([2.5, 0.0, 2, 8]) + >>> target = tensor([3, -0.5, 2, 1]) >>> kendall = KendallRankCorrCoef(t_test=True, alternative='two-sided') >>> kendall(preds, target) (tensor(0.3333), tensor(0.4969)) Example (multi output regression with t-test): - >>> import torch >>> from torchmetrics.regression import KendallRankCorrCoef - >>> preds = torch.tensor([[2.5, 0.0], [2, 8]]) - >>> target = torch.tensor([[3, -0.5], [2, 1]]) + >>> preds = tensor([[2.5, 0.0], [2, 8]]) + >>> target = tensor([[3, -0.5], [2, 1]]) >>> kendall = KendallRankCorrCoef(t_test=True, alternative='two-sided', num_outputs=2) >>> kendall(preds, target) (tensor([1., 1.]), tensor([nan, nan])) diff --git a/src/torchmetrics/regression/kl_divergence.py b/src/torchmetrics/regression/kl_divergence.py index f37c2313ed1..410dfd41a25 100644 --- a/src/torchmetrics/regression/kl_divergence.py +++ b/src/torchmetrics/regression/kl_divergence.py @@ -63,10 +63,10 @@ class KLDivergence(Metric): Half precision is only support on GPU for this metric Example: - >>> import torch + >>> from torch import tensor >>> from torchmetrics.functional import kl_divergence - >>> p = torch.tensor([[0.36, 0.48, 0.16]]) - >>> q = torch.tensor([[1/3, 1/3, 1/3]]) + >>> p = tensor([[0.36, 0.48, 0.16]]) + >>> q = tensor([[1/3, 1/3, 1/3]]) >>> kl_divergence(p, q) tensor(0.0853) """ diff --git a/src/torchmetrics/regression/log_mse.py b/src/torchmetrics/regression/log_mse.py index 596016e58cf..b5a69ffdf89 100644 --- a/src/torchmetrics/regression/log_mse.py +++ b/src/torchmetrics/regression/log_mse.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any -import torch from torch import Tensor, tensor from torchmetrics.functional.regression.log_mse import _mean_squared_log_error_compute, _mean_squared_log_error_update @@ -40,9 +39,10 @@ class MeanSquaredLogError(Metric): kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example: + >>> from torch import tensor >>> from torchmetrics import MeanSquaredLogError - >>> target = torch.tensor([2.5, 5, 4, 8]) - >>> preds = torch.tensor([3, 5, 2.5, 7]) + >>> target = tensor([2.5, 5, 4, 8]) + >>> preds = tensor([3, 5, 2.5, 7]) >>> mean_squared_log_error = MeanSquaredLogError() >>> mean_squared_log_error(preds, target) tensor(0.0397) diff --git a/src/torchmetrics/regression/mae.py b/src/torchmetrics/regression/mae.py index f07da8121b2..33f1b5a1e67 100644 --- a/src/torchmetrics/regression/mae.py +++ b/src/torchmetrics/regression/mae.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any -import torch from torch import Tensor, tensor from torchmetrics.functional.regression.mae import _mean_absolute_error_compute, _mean_absolute_error_update @@ -40,9 +39,10 @@ class MeanAbsoluteError(Metric): kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example: + >>> from torch import tensor >>> from torchmetrics import MeanAbsoluteError - >>> target = torch.tensor([3.0, -0.5, 2.0, 7.0]) - >>> preds = torch.tensor([2.5, 0.0, 2.0, 8.0]) + >>> target = tensor([3.0, -0.5, 2.0, 7.0]) + >>> preds = tensor([2.5, 0.0, 2.0, 8.0]) >>> mean_absolute_error = MeanAbsoluteError() >>> mean_absolute_error(preds, target) tensor(0.5000) diff --git a/src/torchmetrics/regression/mape.py b/src/torchmetrics/regression/mape.py index 2682fc55122..f07441c8182 100644 --- a/src/torchmetrics/regression/mape.py +++ b/src/torchmetrics/regression/mape.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any -import torch from torch import Tensor, tensor from torchmetrics.functional.regression.mape import ( @@ -49,9 +48,10 @@ class MeanAbsolutePercentageError(Metric): This `MAPE implementation returns`_ a very large number instead of ``inf``. Example: + >>> from torch import tensor >>> from torchmetrics import MeanAbsolutePercentageError - >>> target = torch.tensor([1, 10, 1e6]) - >>> preds = torch.tensor([0.9, 15, 1.2e6]) + >>> target = tensor([1, 10, 1e6]) + >>> preds = tensor([0.9, 15, 1.2e6]) >>> mean_abs_percentage_error = MeanAbsolutePercentageError() >>> mean_abs_percentage_error(preds, target) tensor(0.2667) diff --git a/src/torchmetrics/regression/mse.py b/src/torchmetrics/regression/mse.py index 56e33020e25..a6605062687 100644 --- a/src/torchmetrics/regression/mse.py +++ b/src/torchmetrics/regression/mse.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any, Optional, Sequence, Union -import torch from torch import Tensor, tensor from torchmetrics.functional.regression.mse import _mean_squared_error_compute, _mean_squared_error_update @@ -46,9 +45,10 @@ class MeanSquaredError(Metric): kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example: + >>> from torch import tensor >>> from torchmetrics import MeanSquaredError - >>> target = torch.tensor([2.5, 5.0, 4.0, 8.0]) - >>> preds = torch.tensor([3.0, 5.0, 2.5, 7.0]) + >>> target = tensor([2.5, 5.0, 4.0, 8.0]) + >>> preds = tensor([3.0, 5.0, 2.5, 7.0]) >>> mean_squared_error = MeanSquaredError() >>> mean_squared_error(preds, target) tensor(0.8750) @@ -103,23 +103,23 @@ def plot( .. plot:: :scale: 75 + >>> from torch import randn >>> # Example plotting a single value - >>> import torch >>> from torchmetrics.regression import MeanSquaredError >>> metric = MeanSquaredError() - >>> metric.update(torch.randn(10,), torch.randn(10,)) + >>> metric.update(randn(10,), randn(10,)) >>> fig_, ax_ = metric.plot() .. plot:: :scale: 75 + >>> from torch import randn >>> # Example plotting multiple values - >>> import torch >>> from torchmetrics.regression import MeanSquaredError >>> metric = MeanSquaredError() >>> values = [] >>> for _ in range(10): - ... values.append(metric(torch.randn(10,), torch.randn(10,))) + ... values.append(metric(randn(10,), randn(10,))) >>> fig, ax = metric.plot(values) """ val = val or self.compute() diff --git a/src/torchmetrics/regression/spearman.py b/src/torchmetrics/regression/spearman.py index feaeab9e66b..670f02bc816 100644 --- a/src/torchmetrics/regression/spearman.py +++ b/src/torchmetrics/regression/spearman.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any, List -import torch from torch import Tensor from torchmetrics.functional.regression.spearman import _spearman_corrcoef_compute, _spearman_corrcoef_update @@ -46,17 +45,18 @@ class SpearmanCorrCoef(Metric): kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (single output regression): + >>> from torch import tensor >>> from torchmetrics import SpearmanCorrCoef - >>> target = torch.tensor([3, -0.5, 2, 7]) - >>> preds = torch.tensor([2.5, 0.0, 2, 8]) + >>> target = tensor([3, -0.5, 2, 7]) + >>> preds = tensor([2.5, 0.0, 2, 8]) >>> spearman = SpearmanCorrCoef() >>> spearman(preds, target) tensor(1.0000) Example (multi output regression): >>> from torchmetrics import SpearmanCorrCoef - >>> target = torch.tensor([[3, -0.5], [2, 7]]) - >>> preds = torch.tensor([[2.5, 0.0], [2, 8]]) + >>> target = tensor([[3, -0.5], [2, 7]]) + >>> preds = tensor([[2.5, 0.0], [2, 8]]) >>> spearman = SpearmanCorrCoef(num_outputs=2) >>> spearman(preds, target) tensor([1.0000, 1.0000]) diff --git a/src/torchmetrics/retrieval/average_precision.py b/src/torchmetrics/retrieval/average_precision.py index b21e59e1f59..bd52e24c5e9 100644 --- a/src/torchmetrics/retrieval/average_precision.py +++ b/src/torchmetrics/retrieval/average_precision.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from torch import Tensor, tensor +from torch import Tensor from torchmetrics.functional.retrieval.average_precision import retrieval_average_precision from torchmetrics.retrieval.base import RetrievalMetric @@ -53,6 +53,7 @@ class RetrievalMAP(RetrievalMetric): If ``ignore_index`` is not `None` or an integer. Example: + >>> from torch import tensor >>> from torchmetrics import RetrievalMAP >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) diff --git a/src/torchmetrics/retrieval/hit_rate.py b/src/torchmetrics/retrieval/hit_rate.py index 0256c951e4c..dfc9f9c4bfa 100644 --- a/src/torchmetrics/retrieval/hit_rate.py +++ b/src/torchmetrics/retrieval/hit_rate.py @@ -13,7 +13,7 @@ # limitations under the License. from typing import Any, Optional -from torch import Tensor, tensor +from torch import Tensor from torchmetrics.functional.retrieval.hit_rate import retrieval_hit_rate from torchmetrics.retrieval.base import RetrievalMetric @@ -58,6 +58,7 @@ class RetrievalHitRate(RetrievalMetric): If ``k`` parameter is not `None` or an integer larger than 0. Example: + >>> from torch import tensor >>> from torchmetrics import RetrievalHitRate >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) diff --git a/src/torchmetrics/retrieval/ndcg.py b/src/torchmetrics/retrieval/ndcg.py index e20d5493400..65acfeb1262 100644 --- a/src/torchmetrics/retrieval/ndcg.py +++ b/src/torchmetrics/retrieval/ndcg.py @@ -13,7 +13,7 @@ # limitations under the License. from typing import Any, Optional -from torch import Tensor, tensor +from torch import Tensor from torchmetrics.functional.retrieval.ndcg import retrieval_normalized_dcg from torchmetrics.retrieval.base import RetrievalMetric @@ -58,6 +58,7 @@ class RetrievalNormalizedDCG(RetrievalMetric): If ``k`` parameter is not `None` or an integer larger than 0. Example: + >>> from torch import tensor >>> from torchmetrics import RetrievalNormalizedDCG >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) diff --git a/src/torchmetrics/retrieval/precision.py b/src/torchmetrics/retrieval/precision.py index ef004c26b10..09a1c823b1a 100644 --- a/src/torchmetrics/retrieval/precision.py +++ b/src/torchmetrics/retrieval/precision.py @@ -13,7 +13,7 @@ # limitations under the License. from typing import Any, Optional -from torch import Tensor, tensor +from torch import Tensor from torchmetrics.functional.retrieval.precision import retrieval_precision from torchmetrics.retrieval.base import RetrievalMetric @@ -61,6 +61,7 @@ class RetrievalPrecision(RetrievalMetric): If ``adaptive_k`` is not boolean. Example: + >>> from torch import tensor >>> from torchmetrics import RetrievalPrecision >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) diff --git a/src/torchmetrics/retrieval/precision_recall_curve.py b/src/torchmetrics/retrieval/precision_recall_curve.py index ca1b017a5ac..82a0ce5be5d 100644 --- a/src/torchmetrics/retrieval/precision_recall_curve.py +++ b/src/torchmetrics/retrieval/precision_recall_curve.py @@ -14,7 +14,7 @@ from typing import Any, Optional, Tuple import torch -from torch import Tensor, tensor +from torch import Tensor from torchmetrics import Metric from torchmetrics.functional.retrieval.precision_recall_curve import retrieval_precision_recall_curve @@ -101,6 +101,7 @@ class RetrievalPrecisionRecallCurve(Metric): If ``max_k`` parameter is not `None` or an integer larger than 0. Example: + >>> from torch import tensor >>> from torchmetrics import RetrievalPrecisionRecallCurve >>> indexes = tensor([0, 0, 0, 0, 1, 1, 1]) >>> preds = tensor([0.4, 0.01, 0.5, 0.6, 0.2, 0.3, 0.5]) @@ -256,6 +257,7 @@ class RetrievalRecallAtFixedPrecision(RetrievalPrecisionRecallCurve): If ``max_k`` parameter is not `None` or an integer larger than 0. Example: + >>> from torch import tensor >>> from torchmetrics import RetrievalRecallAtFixedPrecision >>> indexes = tensor([0, 0, 0, 0, 1, 1, 1]) >>> preds = tensor([0.4, 0.01, 0.5, 0.6, 0.2, 0.3, 0.5]) diff --git a/src/torchmetrics/retrieval/r_precision.py b/src/torchmetrics/retrieval/r_precision.py index 0d88722ed27..2ec79dad0a4 100644 --- a/src/torchmetrics/retrieval/r_precision.py +++ b/src/torchmetrics/retrieval/r_precision.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from torch import Tensor, tensor +from torch import Tensor from torchmetrics.functional.retrieval.r_precision import retrieval_r_precision from torchmetrics.retrieval.base import RetrievalMetric @@ -53,6 +53,7 @@ class RetrievalRPrecision(RetrievalMetric): If ``ignore_index`` is not `None` or an integer. Example: + >>> from torch import tensor >>> from torchmetrics import RetrievalRPrecision >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) diff --git a/src/torchmetrics/retrieval/recall.py b/src/torchmetrics/retrieval/recall.py index e9724621dd4..8222fa9f8eb 100644 --- a/src/torchmetrics/retrieval/recall.py +++ b/src/torchmetrics/retrieval/recall.py @@ -13,7 +13,7 @@ # limitations under the License. from typing import Any, Optional -from torch import Tensor, tensor +from torch import Tensor from torchmetrics.functional.retrieval.recall import retrieval_recall from torchmetrics.retrieval.base import RetrievalMetric @@ -57,6 +57,7 @@ class RetrievalRecall(RetrievalMetric): If ``k`` parameter is not `None` or an integer larger than 0. Example: + >>> from torch import tensor >>> from torchmetrics import RetrievalRecall >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) diff --git a/src/torchmetrics/retrieval/reciprocal_rank.py b/src/torchmetrics/retrieval/reciprocal_rank.py index 7974e613696..3882f180202 100644 --- a/src/torchmetrics/retrieval/reciprocal_rank.py +++ b/src/torchmetrics/retrieval/reciprocal_rank.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from torch import Tensor, tensor +from torch import Tensor from torchmetrics.functional.retrieval.reciprocal_rank import retrieval_reciprocal_rank from torchmetrics.retrieval.base import RetrievalMetric @@ -52,6 +52,7 @@ class RetrievalMRR(RetrievalMetric): If ``ignore_index`` is not `None` or an integer. Example: + >>> from torch import tensor >>> from torchmetrics import RetrievalMRR >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) diff --git a/src/torchmetrics/wrappers/classwise.py b/src/torchmetrics/wrappers/classwise.py index e4fd999a323..77003f78001 100644 --- a/src/torchmetrics/wrappers/classwise.py +++ b/src/torchmetrics/wrappers/classwise.py @@ -41,7 +41,6 @@ class ClasswiseWrapper(Metric): 'multiclassaccuracy_2': tensor(0.)} Example (labels as list of strings): - >>> import torch >>> from torchmetrics import ClasswiseWrapper >>> from torchmetrics.classification import MulticlassAccuracy >>> metric = ClasswiseWrapper( @@ -56,7 +55,6 @@ class ClasswiseWrapper(Metric): 'multiclassaccuracy_dog': tensor(0.)} Example (in metric collection): - >>> import torch >>> from torchmetrics import ClasswiseWrapper, MetricCollection >>> from torchmetrics.classification import MulticlassAccuracy, MulticlassRecall >>> labels = ["horse", "fish", "dog"] diff --git a/tests/unittests/image/test_lpips.py b/tests/unittests/image/test_lpips.py index 5cdabba73e4..8402e73d0e6 100644 --- a/tests/unittests/image/test_lpips.py +++ b/tests/unittests/image/test_lpips.py @@ -20,17 +20,17 @@ from torch import Tensor from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity -from torchmetrics.utilities.imports import _LPIPS_AVAILABLE +from torchmetrics.utilities.imports import _LPIPS_AVAILABLE, _TORCH_GREATER_EQUAL_1_9 from unittests.helpers import seed_all -from unittests.helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester +from unittests.helpers.testers import MetricTester seed_all(42) Input = namedtuple("Input", ["img1", "img2"]) _inputs = Input( - img1=torch.rand(int(NUM_BATCHES * 0.4), int(BATCH_SIZE / 16), 3, 100, 100), - img2=torch.rand(int(NUM_BATCHES * 0.4), int(BATCH_SIZE / 16), 3, 100, 100), + img1=torch.rand(4, 2, 3, 100, 100), + img2=torch.rand(4, 2, 3, 100, 100), ) @@ -70,10 +70,11 @@ def test_lpips_differentiability(self): preds=_inputs.img1, target=_inputs.img2, metric_module=LearnedPerceptualImagePatchSimilarity ) - # LPIPS half + cpu does not work due to missing support in torch.min - @pytest.mark.xfail(reason="LPIPS metric does not support cpu + half precision") + # LPIPS half + cpu does not work due to missing support in torch.min for older version of torch def test_lpips_half_cpu(self): """test for half + cpu support.""" + if not _TORCH_GREATER_EQUAL_1_9: + pytest.xfail(reason="LPIPS metric does not support cpu + half precision for v1.8.1 or lower of Pytorch") self.run_precision_test_cpu(_inputs.img1, _inputs.img2, LearnedPerceptualImagePatchSimilarity) @pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda") diff --git a/tests/unittests/regression/test_tweedie_deviance.py b/tests/unittests/regression/test_tweedie_deviance.py index a4d2c2097db..5b5ca6ce9f4 100644 --- a/tests/unittests/regression/test_tweedie_deviance.py +++ b/tests/unittests/regression/test_tweedie_deviance.py @@ -21,6 +21,7 @@ from torchmetrics.functional.regression.tweedie_deviance import tweedie_deviance_score from torchmetrics.regression.tweedie_deviance import TweedieDevianceScore +from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_9 from unittests.helpers import seed_all from unittests.helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester @@ -82,14 +83,15 @@ def test_deviance_scores_functional(self, preds, targets, power): metric_args=dict(power=power), ) - def test_pearson_corrcoef_differentiability(self, preds, targets, power): + def test_deviance_scores_differentiability(self, preds, targets, power): self.run_differentiability_test( preds, targets, metric_module=TweedieDevianceScore, metric_functional=tweedie_deviance_score ) - # Tweedie Deviance Score half + cpu does not work due to missing support in torch.log - @pytest.mark.xfail(reason="TweedieDevianceScore metric does not support cpu + half precision") - def test_pearson_corrcoef_half_cpu(self, preds, targets, power): + # Tweedie Deviance Score half + cpu does not work for power=[1,2] due to missing support in torch.log + def test_deviance_scores_half_cpu(self, preds, targets, power): + if not _TORCH_GREATER_EQUAL_1_9 or power in [1, 2]: + pytest.xfail(reason="TweedieDevianceScore metric does not support cpu + half precision for older Pytorch") metric_args = {"power": power} self.run_precision_test_cpu( preds, @@ -100,7 +102,7 @@ def test_pearson_corrcoef_half_cpu(self, preds, targets, power): ) @pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda") - def test_pearson_corrcoef_half_gpu(self, preds, targets, power): + def test_deviance_scores_half_gpu(self, preds, targets, power): metric_args = {"power": power} self.run_precision_test_gpu( preds,