Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Typing fixes #17000

Merged
merged 1 commit into from
Mar 8, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions src/lightning/pytorch/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- Changed minimum supported version of `rich` from `10.14.0` to `12.13.0` ([#16798](https://github.com/Lightning-AI/lightning/pull/16798))


- The `ServableModule` is now an abstract interface ([#17000](https://github.com/Lightning-AI/lightning/pull/17000))


### Deprecated

-
Expand Down
7 changes: 6 additions & 1 deletion src/lightning/pytorch/serve/servable_module.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, Tuple

import torch
from torch import Tensor


class ServableModule(torch.nn.Module):
class ServableModule(ABC, torch.nn.Module):

"""The ServableModule provides a simple API to make your model servable.

Expand Down Expand Up @@ -56,9 +57,11 @@ def configure_response(self):
assert serve_cb.resp.json() == {"output": [0, 1]}
"""

@abstractmethod
def configure_payload(self) -> Dict[str, Any]:
"""Returns a request payload as a dictionary."""

@abstractmethod
def configure_serialization(self) -> Tuple[Dict[str, Callable], Dict[str, Callable]]:
"""Returns a tuple of dictionaries.

Expand All @@ -69,6 +72,7 @@ def configure_serialization(self) -> Tuple[Dict[str, Callable], Dict[str, Callab
and the associated serialization function (e.g function to convert a tensors into payload).
"""

@abstractmethod
def serve_step(self, *args: Tensor, **kwargs: Tensor) -> Dict[str, Tensor]:
r"""
Returns the predictions of your model as a dictionary.
Expand All @@ -86,5 +90,6 @@ def serve_step(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
- ``dict`` - A dictionary with their associated tensors.
"""

@abstractmethod
def configure_response(self) -> Dict[str, Any]:
"""Returns a response to validate the server response."""
6 changes: 2 additions & 4 deletions src/lightning/pytorch/utilities/parsing.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,14 +45,12 @@ def clean_namespace(hparams: MutableMapping) -> None:
del hparams[k]


def parse_class_init_keys(
cls: Union[Type["pl.LightningModule"], Type["pl.LightningDataModule"]]
) -> Tuple[str, Optional[str], Optional[str]]:
def parse_class_init_keys(cls: Type) -> Tuple[str, Optional[str], Optional[str]]:
"""Parse key words for standard ``self``, ``*args`` and ``**kwargs``.

Examples:

>>> class Model():
>>> class Model:
... def __init__(self, hparams, *my_args, anykw=42, **my_kwargs):
... pass
>>> parse_class_init_keys(Model)
Expand Down
300 changes: 145 additions & 155 deletions src/lightning/pytorch/utilities/testing/_runif.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from typing import Any, Optional

import torch
from lightning_utilities.core.imports import compare_version, module_available, RequirementCache
from lightning_utilities.core.imports import compare_version, RequirementCache
from packaging.version import Version

from lightning.fabric.accelerators.cuda import num_cuda_devices
Expand All @@ -31,15 +31,28 @@
from lightning.pytorch.strategies.deepspeed import _DEEPSPEED_AVAILABLE
from lightning.pytorch.utilities.imports import _OMEGACONF_AVAILABLE, _PSUTIL_AVAILABLE

if module_available("_pytest"):
from _pytest.mark import MarkDecorator
else:
MarkDecorator = Any

_SKLEARN_AVAILABLE = RequirementCache("scikit-learn")


class _RunIf:
def _RunIf(
min_cuda_gpus: int = 0,
min_torch: Optional[str] = None,
max_torch: Optional[str] = None,
min_python: Optional[str] = None,
bf16_cuda: bool = False,
tpu: bool = False,
ipu: bool = False,
hpu: bool = False,
mps: Optional[bool] = None,
skip_windows: bool = False,
standalone: bool = False,
deepspeed: bool = False,
rich: bool = False,
omegaconf: bool = False,
psutil: bool = False,
sklearn: bool = False,
onnx: bool = False,
) -> Any: # not the real return because it would require that pytest is available
"""Wrapper around ``pytest.mark.skipif`` with specific conditions.

Example:
Expand All @@ -48,152 +61,129 @@ class _RunIf:
@pytest.mark.parametrize("arg1", [1, 2.0])
def test_wrapper(arg1):
assert arg1 > 0.0
"""

def __new__(
self,
min_cuda_gpus: int = 0,
min_torch: Optional[str] = None,
max_torch: Optional[str] = None,
min_python: Optional[str] = None,
bf16_cuda: bool = False,
tpu: bool = False,
ipu: bool = False,
hpu: bool = False,
mps: Optional[bool] = None,
skip_windows: bool = False,
standalone: bool = False,
deepspeed: bool = False,
rich: bool = False,
omegaconf: bool = False,
psutil: bool = False,
sklearn: bool = False,
onnx: bool = False,
) -> MarkDecorator: # not the real return because it would require that pytest is available
"""Configure.

Args:
min_cuda_gpus: Require this number of gpus and that the ``PL_RUN_CUDA_TESTS=1`` environment variable is set.
min_torch: Require that PyTorch is greater or equal than this version.
max_torch: Require that PyTorch is less than this version.
min_python: Require that Python is greater or equal than this version.
bf16_cuda: Require that CUDA device supports bf16.
tpu: Require that TPU is available.
ipu: Require that IPU is available and that the ``PL_RUN_IPU_TESTS=1`` environment variable is set.
hpu: Require that HPU is available.
mps: If True: Require that MPS (Apple Silicon) is available,
if False: Explicitly Require that MPS is not available
skip_windows: Skip for Windows platform.
standalone: Mark the test as standalone, our CI will run it in a separate process.
This requires that the ``PL_RUN_STANDALONE_TESTS=1`` environment variable is set.
deepspeed: Require that microsoft/DeepSpeed is installed.
rich: Require that willmcgugan/rich is installed.
omegaconf: Require that omry/omegaconf is installed.
psutil: Require that psutil is installed.
sklearn: Require that scikit-learn is installed.
onnx: Require that onnx is installed.
"""
import pytest

conditions = []
reasons = []
kwargs: dict = {} # It's required for our CI to run under the different PL_RUN_X_TESTS

if min_cuda_gpus:
conditions.append(num_cuda_devices() < min_cuda_gpus)
reasons.append(f"GPUs>={min_cuda_gpus}")
# used in conftest.py::pytest_collection_modifyitems
kwargs["min_cuda_gpus"] = True

if min_torch:
# set use_base_version for nightly support
conditions.append(compare_version("torch", operator.lt, min_torch, use_base_version=True))
reasons.append(f"torch>={min_torch}, {torch.__version__} installed")

if max_torch:
# set use_base_version for nightly support
conditions.append(compare_version("torch", operator.ge, max_torch, use_base_version=True))
reasons.append(f"torch<{max_torch}, {torch.__version__} installed")

if min_python:
py_version = f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
conditions.append(Version(py_version) < Version(min_python))
reasons.append(f"python>={min_python}")

if bf16_cuda:
try:
cond = not (torch.cuda.is_available() and torch.cuda.is_bf16_supported())
except (AssertionError, RuntimeError) as e:
# AssertionError: Torch not compiled with CUDA enabled
# RuntimeError: Found no NVIDIA driver on your system.
is_unrelated = "Found no NVIDIA driver" not in str(e) or "Torch not compiled with CUDA" not in str(e)
if is_unrelated:
raise e
cond = True

conditions.append(cond)
reasons.append("CUDA device bf16")

if skip_windows:
conditions.append(sys.platform == "win32")
reasons.append("unimplemented on Windows")

if tpu:
conditions.append(not TPUAccelerator.is_available())
reasons.append("TPU")
# used in conftest.py::pytest_collection_modifyitems
kwargs["tpu"] = True

if ipu:
conditions.append(not _IPU_AVAILABLE)
reasons.append("IPU")
# used in conftest.py::pytest_collection_modifyitems
kwargs["ipu"] = True

if hpu:
conditions.append(not _HPU_AVAILABLE)
reasons.append("HPU")

if mps is not None:
if mps:
conditions.append(not MPSAccelerator.is_available())
reasons.append("MPS")
else:
conditions.append(MPSAccelerator.is_available())
reasons.append("not MPS")

if standalone:
env_flag = os.getenv("PL_RUN_STANDALONE_TESTS", "0")
conditions.append(env_flag != "1")
reasons.append("Standalone execution")
# used in conftest.py::pytest_collection_modifyitems
kwargs["standalone"] = True

if deepspeed:
conditions.append(not _DEEPSPEED_AVAILABLE)
reasons.append("Deepspeed")

if rich:
conditions.append(not _RICH_AVAILABLE)
reasons.append("Rich")

if omegaconf:
conditions.append(not _OMEGACONF_AVAILABLE)
reasons.append("omegaconf")

if psutil:
conditions.append(not _PSUTIL_AVAILABLE)
reasons.append("psutil")

if sklearn:
conditions.append(not _SKLEARN_AVAILABLE)
reasons.append("scikit-learn")

if onnx:
conditions.append(_TORCH_GREATER_EQUAL_2_0 and not _ONNX_AVAILABLE)
reasons.append("onnx")

reasons = [rs for cond, rs in zip(conditions, reasons) if cond]
kwargs.pop("condition", None)
kwargs.pop("reason", None)
return pytest.mark.skipif(condition=any(conditions), reason=f"Requires: [{' + '.join(reasons)}]", **kwargs)
Args:
min_cuda_gpus: Require this number of gpus and that the ``PL_RUN_CUDA_TESTS=1`` environment variable is set.
min_torch: Require that PyTorch is greater or equal than this version.
max_torch: Require that PyTorch is less than this version.
min_python: Require that Python is greater or equal than this version.
bf16_cuda: Require that CUDA device supports bf16.
tpu: Require that TPU is available.
ipu: Require that IPU is available and that the ``PL_RUN_IPU_TESTS=1`` environment variable is set.
hpu: Require that HPU is available.
mps: If True: Require that MPS (Apple Silicon) is available,
if False: Explicitly Require that MPS is not available
skip_windows: Skip for Windows platform.
standalone: Mark the test as standalone, our CI will run it in a separate process.
This requires that the ``PL_RUN_STANDALONE_TESTS=1`` environment variable is set.
deepspeed: Require that microsoft/DeepSpeed is installed.
rich: Require that willmcgugan/rich is installed.
omegaconf: Require that omry/omegaconf is installed.
psutil: Require that psutil is installed.
sklearn: Require that scikit-learn is installed.
onnx: Require that onnx is installed.
"""
import pytest

conditions = []
reasons = []
kwargs: dict = {} # It's required for our CI to run under the different PL_RUN_X_TESTS

if min_cuda_gpus:
conditions.append(num_cuda_devices() < min_cuda_gpus)
reasons.append(f"GPUs>={min_cuda_gpus}")
# used in conftest.py::pytest_collection_modifyitems
kwargs["min_cuda_gpus"] = True

if min_torch:
# set use_base_version for nightly support
conditions.append(compare_version("torch", operator.lt, min_torch, use_base_version=True))
reasons.append(f"torch>={min_torch}, {torch.__version__} installed")

if max_torch:
# set use_base_version for nightly support
conditions.append(compare_version("torch", operator.ge, max_torch, use_base_version=True))
reasons.append(f"torch<{max_torch}, {torch.__version__} installed")

if min_python:
py_version = f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
conditions.append(Version(py_version) < Version(min_python))
reasons.append(f"python>={min_python}")

if bf16_cuda:
try:
cond = not (torch.cuda.is_available() and torch.cuda.is_bf16_supported())
except (AssertionError, RuntimeError) as e:
# AssertionError: Torch not compiled with CUDA enabled
# RuntimeError: Found no NVIDIA driver on your system.
is_unrelated = "Found no NVIDIA driver" not in str(e) or "Torch not compiled with CUDA" not in str(e)
if is_unrelated:
raise e
cond = True

conditions.append(cond)
reasons.append("CUDA device bf16")

if skip_windows:
conditions.append(sys.platform == "win32")
reasons.append("unimplemented on Windows")

if tpu:
conditions.append(not TPUAccelerator.is_available())
reasons.append("TPU")
# used in conftest.py::pytest_collection_modifyitems
kwargs["tpu"] = True

if ipu:
conditions.append(not _IPU_AVAILABLE)
reasons.append("IPU")
# used in conftest.py::pytest_collection_modifyitems
kwargs["ipu"] = True

if hpu:
conditions.append(not _HPU_AVAILABLE)
reasons.append("HPU")

if mps is not None:
if mps:
conditions.append(not MPSAccelerator.is_available())
reasons.append("MPS")
else:
conditions.append(MPSAccelerator.is_available())
reasons.append("not MPS")

if standalone:
env_flag = os.getenv("PL_RUN_STANDALONE_TESTS", "0")
conditions.append(env_flag != "1")
reasons.append("Standalone execution")
# used in conftest.py::pytest_collection_modifyitems
kwargs["standalone"] = True

if deepspeed:
conditions.append(not _DEEPSPEED_AVAILABLE)
reasons.append("Deepspeed")

if rich:
conditions.append(not _RICH_AVAILABLE)
reasons.append("Rich")

if omegaconf:
conditions.append(not _OMEGACONF_AVAILABLE)
reasons.append("omegaconf")

if psutil:
conditions.append(not _PSUTIL_AVAILABLE)
reasons.append("psutil")

if sklearn:
conditions.append(not _SKLEARN_AVAILABLE)
reasons.append("scikit-learn")

if onnx:
conditions.append(_TORCH_GREATER_EQUAL_2_0 and not _ONNX_AVAILABLE)
reasons.append("onnx")

reasons = [rs for cond, rs in zip(conditions, reasons) if cond]
kwargs.pop("condition", None)
kwargs.pop("reason", None)
return pytest.mark.skipif(condition=any(conditions), reason=f"Requires: [{' + '.join(reasons)}]", **kwargs)