Skip to content

Commit

Permalink
Remove deprecated auto_select_gpus Trainer argument (#16184)
Browse files Browse the repository at this point in the history
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
2 people authored and carmocca committed Jan 4, 2023
1 parent a162f81 commit 1e00e54
Show file tree
Hide file tree
Showing 8 changed files with 5 additions and 190 deletions.
4 changes: 4 additions & 0 deletions src/pytorch_lightning/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
* Removed the `Trainer(ipus=...)` argument
* Removed the `Trainer(num_processes=...)` argument

- Removed the deprecated automatic GPU selection ([#16184](https://github.com/Lightning-AI/lightning/pull/16184))
* Removed the `Trainer(auto_select_gpus=...)` argument
* Removed the `pytorch_lightning.tuner.auto_gpu_select.{pick_single_gpu,pick_multiple_gpus}` functions


- Removed the deprecated `resume_from_checkpoint` Trainer argument ([#16167](https://github.com/Lightning-AI/lightning/pull/16167))

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,10 +75,9 @@
TPUSpawnStrategy,
)
from pytorch_lightning.strategies.ddp_spawn import _DDP_FORK_ALIASES
from pytorch_lightning.tuner.auto_gpu_select import pick_multiple_gpus
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _IPU_AVAILABLE
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_info, rank_zero_warn
from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_warn

log = logging.getLogger(__name__)

Expand All @@ -98,7 +97,6 @@ def __init__(
benchmark: Optional[bool] = None,
replace_sampler_ddp: bool = True,
deterministic: Optional[Union[bool, _LITERAL_WARN]] = False,
auto_select_gpus: Optional[bool] = None, # TODO: Remove in v1.10.0
) -> None:
"""The AcceleratorConnector parses several Trainer arguments and instantiates the Strategy including other
components such as the Accelerator and Precision plugins.
Expand Down Expand Up @@ -428,7 +426,6 @@ def _set_parallel_devices_and_init_accelerator(self) -> None:
)

self._set_devices_flag_if_auto_passed()
self._set_devices_flag_if_auto_select_gpus_passed()
self._devices_flag = accelerator_cls.parse_devices(self._devices_flag)
if not self._parallel_devices:
self._parallel_devices = accelerator_cls.get_parallel_devices(self._devices_flag)
Expand All @@ -437,24 +434,6 @@ def _set_devices_flag_if_auto_passed(self) -> None:
if self._devices_flag == "auto" or self._devices_flag is None:
self._devices_flag = self.accelerator.auto_device_count()

def _set_devices_flag_if_auto_select_gpus_passed(self) -> None:
if self._auto_select_gpus is not None:
rank_zero_deprecation(
"The Trainer argument `auto_select_gpus` has been deprecated in v1.9.0 and will be removed in v1.10.0."
" Please use the function `pytorch_lightning.accelerators.find_usable_cuda_devices` instead."
)
if (
self._auto_select_gpus
and isinstance(self._devices_flag, int)
and isinstance(self.accelerator, CUDAAccelerator)
):
self._devices_flag = pick_multiple_gpus(
self._devices_flag,
# we already show a deprecation message when user sets Trainer(auto_select_gpus=...)
_show_deprecation=False,
)
log.info(f"Auto select gpus: {self._devices_flag}")

def _choose_and_init_cluster_environment(self) -> ClusterEnvironment:
if isinstance(self._cluster_environment_flag, ClusterEnvironment):
return self._cluster_environment_flag
Expand Down
2 changes: 0 additions & 2 deletions src/pytorch_lightning/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,6 @@ def __init__(
gradient_clip_algorithm: Optional[str] = None,
num_nodes: int = 1,
devices: Optional[Union[List[int], str, int]] = None,
auto_select_gpus: Optional[bool] = None, # TODO: Remove in 2.0
enable_progress_bar: bool = True,
overfit_batches: Union[int, float] = 0.0,
track_grad_norm: Union[int, float, str] = -1,
Expand Down Expand Up @@ -363,7 +362,6 @@ def __init__(
benchmark=benchmark,
replace_sampler_ddp=replace_sampler_ddp,
deterministic=deterministic,
auto_select_gpus=auto_select_gpus,
precision=precision,
plugins=plugins,
)
Expand Down
96 changes: 0 additions & 96 deletions src/pytorch_lightning/tuner/auto_gpu_select.py

This file was deleted.

1 change: 0 additions & 1 deletion tests/tests_pytorch/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,6 @@ def reset_deterministic_algorithm():
def mock_cuda_count(monkeypatch, n: int) -> None:
monkeypatch.setattr(lightning_fabric.accelerators.cuda, "num_cuda_devices", lambda: n)
monkeypatch.setattr(pytorch_lightning.accelerators.cuda, "num_cuda_devices", lambda: n)
monkeypatch.setattr(pytorch_lightning.tuner.auto_gpu_select, "num_cuda_devices", lambda: n)


@pytest.fixture(scope="function")
Expand Down
1 change: 0 additions & 1 deletion tests/tests_pytorch/deprecated_api/test_remove_1-10.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@
from pytorch_lightning.strategies.bagua import LightningBaguaModule
from pytorch_lightning.strategies.utils import on_colab_kaggle
from pytorch_lightning.trainer.states import RunningStage, TrainerFn
from pytorch_lightning.tuner.auto_gpu_select import pick_multiple_gpus, pick_single_gpu
from pytorch_lightning.utilities.apply_func import (
apply_to_collection,
apply_to_collections,
Expand Down
59 changes: 0 additions & 59 deletions tests/tests_pytorch/trainer/properties/test_auto_gpu_select.py

This file was deleted.

9 changes: 0 additions & 9 deletions tests/tests_pytorch/trainer/test_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1168,15 +1168,6 @@ def test_invalid_gradient_clip_algo(tmpdir):
Trainer(default_root_dir=tmpdir, gradient_clip_algorithm="norm2")


@RunIf(min_cuda_gpus=1)
def test_invalid_gpu_choice_with_auto_select_gpus():
num_gpus = torch.cuda.device_count()
with pytest.raises(MisconfigurationException, match=r".*but your machine only has.*"), pytest.deprecated_call(
match="The function `pick_multiple_gpus` has been deprecated in v1.9.0"
):
Trainer(accelerator="gpu", devices=num_gpus + 1, auto_select_gpus=True)


@pytest.mark.parametrize("limit_val_batches", [0.0, 1, 1.0, 0.5, 5])
def test_num_sanity_val_steps(tmpdir, limit_val_batches):
"""Test that the number of sanity check batches is clipped to `limit_val_batches`."""
Expand Down

0 comments on commit 1e00e54

Please sign in to comment.