Skip to content

Commit

Permalink
3350 drops pytorch 1.5.x support (#3353)
Browse files Browse the repository at this point in the history
* drop pytorch 1.5.x

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* update premerge gpu

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* update based on comments

Signed-off-by: Wenqi Li <wenqil@nvidia.com>
  • Loading branch information
wyli authored Nov 18, 2021
1 parent 33b6d61 commit 2e83cd2
Show file tree
Hide file tree
Showing 16 changed files with 36 additions and 53 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/cron.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ jobs:
runs-on: [self-hosted, linux, x64, common]
strategy:
matrix:
pytorch-version: [1.5.1, 1.6.0, 1.7.1, 1.8.1, latest]
pytorch-version: [1.6.0, 1.7.1, 1.8.1, 1.9.1, latest]
steps:
- uses: actions/checkout@v2
- name: Install the dependencies
Expand All @@ -25,14 +25,14 @@ jobs:
python -m pip uninstall -y torch torchvision
if [ ${{ matrix.pytorch-version }} == "latest" ]; then
python -m pip install torch torchvision
elif [ ${{ matrix.pytorch-version }} == "1.5.1" ]; then
python -m pip install torch==1.5.1 torchvision==0.6.1
elif [ ${{ matrix.pytorch-version }} == "1.6.0" ]; then
python -m pip install torch==1.6.0 torchvision==0.7.0
elif [ ${{ matrix.pytorch-version }} == "1.7.1" ]; then
python -m pip install torch==1.7.1 torchvision==0.8.2
elif [ ${{ matrix.pytorch-version }} == "1.8.1" ]; then
python -m pip install torch==1.8.1 torchvision==0.9.1
elif [ ${{ matrix.pytorch-version }} == "1.9.1" ]; then
python -m pip install torch==1.9.1 torchvision==0.10.1
fi
python -m pip install -r requirements-dev.txt
python -m pip list
Expand Down
10 changes: 5 additions & 5 deletions .github/workflows/pythonapp-gpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,17 +19,13 @@ jobs:
strategy:
matrix:
environment:
- "PT16+CUDA110"
- "PT17+CUDA102"
- "PT17+CUDA110"
- "PT18+CUDA102"
- "PT18+CUDA110"
- "PT19+CUDA114"
- "PT110+CUDA102"
include:
- environment: PT16+CUDA110
# we explicitly set pytorch to -h to avoid pip install error
pytorch: "-h"
base: "nvcr.io/nvidia/pytorch:20.07-py3"
- environment: PT17+CUDA102
pytorch: "torch==1.7.1 torchvision==0.8.2"
base: "nvcr.io/nvidia/cuda:10.2-devel-ubuntu18.04"
Expand All @@ -40,6 +36,10 @@ jobs:
- environment: PT18+CUDA102
pytorch: "torch==1.8.1 torchvision==0.9.1"
base: "nvcr.io/nvidia/cuda:10.2-devel-ubuntu18.04"
- environment: PT18+CUDA110
# we explicitly set pytorch to -h to avoid pip install error
pytorch: "-h"
base: "nvcr.io/nvidia/pytorch:21.02-py3"
- environment: PT19+CUDA114
# we explicitly set pytorch to -h to avoid pip install error
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/pythonapp-min.yml
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ jobs:
strategy:
fail-fast: false
matrix:
pytorch-version: [1.5.1, 1.6.0, 1.7.1, 1.8.1, latest]
pytorch-version: [1.6.0, 1.7.1, 1.8.1, 1.9.1, latest]
timeout-minutes: 40
steps:
- uses: actions/checkout@v2
Expand Down Expand Up @@ -148,14 +148,14 @@ jobs:
# min. requirements
if [ ${{ matrix.pytorch-version }} == "latest" ]; then
python -m pip install torch
elif [ ${{ matrix.pytorch-version }} == "1.5.1" ]; then
python -m pip install torch==1.5.1
elif [ ${{ matrix.pytorch-version }} == "1.6.0" ]; then
python -m pip install torch==1.6.0
elif [ ${{ matrix.pytorch-version }} == "1.7.1" ]; then
python -m pip install torch==1.7.1
elif [ ${{ matrix.pytorch-version }} == "1.8.1" ]; then
python -m pip install torch==1.8.1
elif [ ${{ matrix.pytorch-version }} == "1.9.1" ]; then
python -m pip install torch==1.9.1
fi
python -m pip install -r requirements-min.txt
python -m pip list
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/pythonapp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ jobs:
# install the latest pytorch for testing
# however, "pip install monai*.tar.gz" will build cpp/cuda with an isolated
# fresh torch installation according to pyproject.toml
python -m pip install torch>=1.5 torchvision
python -m pip install torch>=1.6 torchvision
- name: Check packages
run: |
pip uninstall monai
Expand Down
2 changes: 1 addition & 1 deletion docs/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
-f https://download.pytorch.org/whl/cpu/torch-1.6.0%2Bcpu-cp37-cp37m-linux_x86_64.whl
torch>=1.5
torch>=1.6
pytorch-ignite==0.4.6
numpy>=1.17
itk>=5.2
Expand Down
3 changes: 1 addition & 2 deletions monai/networks/layers/simplelayers.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,8 +215,7 @@ def separable_filtering(x: torch.Tensor, kernels: List[torch.Tensor], mode: str
could be a single kernel (duplicated for all spatial dimensions), or
a list of `spatial_dims` number of kernels.
mode (string, optional): padding mode passed to convolution class. ``'zeros'``, ``'reflect'``, ``'replicate'``
or ``'circular'``. Default: ``'zeros'``. Modes other than ``'zeros'`` require PyTorch version >= 1.5.1. See
torch.nn.Conv1d() for more information.
or ``'circular'``. Default: ``'zeros'``. See ``torch.nn.Conv1d()`` for more information.
Raises:
TypeError: When ``x`` is not a ``torch.Tensor``.
Expand Down
8 changes: 1 addition & 7 deletions monai/transforms/utils_pytorch_numpy_unification.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@

from monai.config.type_definitions import NdarrayOrTensor
from monai.utils.misc import is_module_ver_at_least
from monai.utils.type_conversion import convert_to_dst_type

__all__ = [
"moveaxis",
Expand Down Expand Up @@ -297,12 +296,7 @@ def searchsorted(a: NdarrayOrTensor, v: NdarrayOrTensor, right=False, sorter=Non
side = "right" if right else "left"
if isinstance(a, np.ndarray):
return np.searchsorted(a, v, side, sorter) # type: ignore
if hasattr(torch, "searchsorted"):
return torch.searchsorted(a, v, right=right) # type: ignore
# if using old PyTorch, will convert to numpy array then compute
ret = np.searchsorted(a.cpu().numpy(), v.cpu().numpy(), side, sorter) # type: ignore
ret, *_ = convert_to_dst_type(ret, a)
return ret
return torch.searchsorted(a, v, right=right) # type: ignore


def repeat(a: NdarrayOrTensor, repeats: int, axis: Optional[int] = None):
Expand Down
8 changes: 4 additions & 4 deletions monai/visualize/class_activation_maps.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

from monai.config import NdarrayTensor
from monai.transforms import ScaleIntensity
from monai.utils import ensure_tuple, get_torch_version_tuple
from monai.utils import ensure_tuple, pytorch_after
from monai.visualize.visualizer import default_upsampler

__all__ = ["CAM", "GradCAM", "GradCAMpp", "ModelWithHooks", "default_normalizer"]
Expand Down Expand Up @@ -80,13 +80,13 @@ def __init__(
continue
_registered.append(name)
if self.register_backward:
if get_torch_version_tuple() < (1, 8):
mod.register_backward_hook(self.backward_hook(name))
else:
if pytorch_after(1, 8):
if "inplace" in mod.__dict__ and mod.__dict__["inplace"]:
# inplace=True causes errors for register_full_backward_hook
mod.__dict__["inplace"] = False
mod.register_full_backward_hook(self.backward_hook(name))
else:
mod.register_backward_hook(self.backward_hook(name))
if self.register_forward:
mod.register_forward_hook(self.forward_hook(name))
if len(_registered) != len(self.target_layers):
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
requires = [
"wheel",
"setuptools",
"torch>=1.5",
"torch>=1.6",
"ninja",
]

Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
torch>=1.5
torch>=1.6
numpy>=1.17
2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ setup_requires =
torch
ninja
install_requires =
torch>=1.5
torch>=1.6
numpy>=1.17

[options.extras_require]
Expand Down
4 changes: 2 additions & 2 deletions tests/test_cachedataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

from monai.data import CacheDataset, DataLoader, PersistentDataset, SmartCacheDataset
from monai.transforms import Compose, Lambda, LoadImaged, RandLambda, ThreadUnsafe, Transform
from monai.utils import get_torch_version_tuple
from monai.utils.module import pytorch_after

TEST_CASE_1 = [Compose([LoadImaged(keys=["image", "label", "extra"])]), (128, 128, 128)]

Expand Down Expand Up @@ -134,7 +134,7 @@ class TestCacheThread(unittest.TestCase):
@parameterized.expand(TEST_DS)
def test_thread_safe(self, persistent_workers, cache_workers, loader_workers):
expected = [102, 202, 302, 402, 502, 602, 702, 802, 902, 1002]
_kwg = {"persistent_workers": persistent_workers} if get_torch_version_tuple() > (1, 7) else {}
_kwg = {"persistent_workers": persistent_workers} if pytorch_after(1, 8) else {}
data_list = list(range(1, 11))
dataset = CacheDataset(
data=data_list, transform=_StatefulTransform(), cache_rate=1.0, num_workers=cache_workers, progress=False
Expand Down
5 changes: 2 additions & 3 deletions tests/test_integration_workflows.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,16 +346,15 @@ def _test_saved_files(postfix):

def test_training(self):
repeated = []
test_rounds = 3 if monai.utils.module.get_torch_version_tuple() >= (1, 6) else 2
test_rounds = 3
for i in range(test_rounds):
results = self.train_and_infer(idx=i)
repeated.append(results)
np.testing.assert_allclose(repeated[0], repeated[1])

@TimedCall(seconds=300, skip_timing=not torch.cuda.is_available(), daemon=False)
def test_timing(self):
if monai.utils.module.get_torch_version_tuple() >= (1, 6):
self.train_and_infer(idx=2)
self.train_and_infer(idx=2)


if __name__ == "__main__":
Expand Down
3 changes: 1 addition & 2 deletions tests/test_invertd.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,9 +161,8 @@ def test_invert(self):
print("invert diff", reverted.size - n_good)
# 25300: 2 workers (cpu, non-macos)
# 1812: 0 workers (gpu or macos)
# 1824: torch 1.5.1
# 1821: windows torch 1.10.0
self.assertTrue((reverted.size - n_good) in (34007, 1812, 1824, 1821), f"diff. {reverted.size - n_good}")
self.assertTrue((reverted.size - n_good) in (34007, 1812, 1821), f"diff. {reverted.size - n_good}")

set_determinism(seed=None)

Expand Down
18 changes: 8 additions & 10 deletions tests/test_map_label_value.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
from parameterized import parameterized

from monai.transforms import MapLabelValue
from monai.utils import pytorch_after
from tests.utils import TEST_NDARRAYS

TESTS = []
Expand All @@ -33,15 +32,14 @@
[{"orig_labels": [1, 2, 3], "target_labels": [0.5, 1.5, 2.5]}, p([3, 1, 1, 2]), p([2.5, 0.5, 0.5, 1.5])],
]
)
# PyTorch 1.5.1 doesn't support rich dtypes
if pytorch_after(1, 7):
TESTS.append(
[
{"orig_labels": [1.5, 2.5, 3.5], "target_labels": [0, 1, 2], "dtype": np.int8},
p([3.5, 1.5, 1.5, 2.5]),
p([2, 0, 0, 1]),
]
)
# note: PyTorch 1.5.1 doesn't support rich dtypes
TESTS.append(
[
{"orig_labels": [1.5, 2.5, 3.5], "target_labels": [0, 1, 2], "dtype": np.int8},
p([3.5, 1.5, 1.5, 2.5]),
p([2, 0, 0, 1]),
]
)
TESTS.extend(
[
[
Expand Down
8 changes: 1 addition & 7 deletions tests/test_mmar_download.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from monai.apps import RemoteMMARKeys, download_mmar, get_model_spec, load_from_mmar
from monai.apps.mmars import MODEL_DESC
from monai.apps.mmars.mmars import _get_val
from tests.utils import SkipIfAtLeastPyTorchVersion, SkipIfBeforePyTorchVersion, skip_if_quick
from tests.utils import SkipIfBeforePyTorchVersion, skip_if_quick

TEST_CASES = [["clara_pt_prostate_mri_segmentation_1"], ["clara_pt_covid19_ct_lesion_segmentation_1"]]
TEST_EXTRACT_CASES = [
Expand Down Expand Up @@ -125,7 +125,6 @@ def test_download(self, idx):

@parameterized.expand(TEST_EXTRACT_CASES)
@skip_if_quick
@SkipIfBeforePyTorchVersion((1, 6))
def test_load_ckpt(self, input_args, expected_name, expected_val):
try:
output = load_from_mmar(**input_args)
Expand All @@ -143,11 +142,6 @@ def test_unique(self):
keys = sorted(m["id"] for m in MODEL_DESC)
self.assertTrue(keys == sorted(set(keys)))

@SkipIfAtLeastPyTorchVersion((1, 6))
def test_no_default(self):
with self.assertRaises(ValueError):
download_mmar(0)

def test_search(self):
self.assertEqual(_get_val({"a": 1, "b": 2}, key="b"), 2)
self.assertEqual(_get_val({"a": {"c": {"c": 4}}, "b": {"c": 2}}, key="b"), {"c": 2})
Expand Down

0 comments on commit 2e83cd2

Please sign in to comment.