Skip to content

Commit

Permalink
Updating to match Numpy 2.0 requirements (#7857)
Browse files Browse the repository at this point in the history
Fixes #7856. 

### Description

This introduces changes to meet Numpy 2.0 requirements. MONAI itself is
compatible with Numpy 2.0 however some dependencies are not such as
older versions of Pytorch. This PR adjusts the MAX_SEED value to be
compatible with Numpy 2.0 behaviour changes, uses the `ptp` function,
and some other minor tweaks. The versions for dependencies are also
fixed to exclude Numpy 2.0.

### Types of changes
<!--- Put an `x` in all the boxes that apply, and remove the not
applicable items -->
- [x] Non-breaking change (fix or new feature that would not break
existing functionality).
- [ ] Breaking change (fix or new feature that would cause existing
functionality to change).
- [ ] New tests added to cover the changes.
- [ ] Integration tests passed locally by running `./runtests.sh -f -u
--net --coverage`.
- [ ] Quick tests passed locally by running `./runtests.sh --quick
--unittests --disttests`.
- [ ] In-line docstrings updated.
- [ ] Documentation updated, tested `make html` command in the `docs/`
folder.

---------

Signed-off-by: Eric Kerfoot <eric.kerfoot@kcl.ac.uk>
Co-authored-by: YunLiu <55491388+KumoLiu@users.noreply.github.com>
  • Loading branch information
ericspod and KumoLiu authored Aug 19, 2024
1 parent 9f56a3a commit 3a6f620
Show file tree
Hide file tree
Showing 11 changed files with 15 additions and 15 deletions.
4 changes: 2 additions & 2 deletions environment-dev.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@ channels:
- nvidia
- conda-forge
dependencies:
- numpy>=1.20
- numpy>=1.24,<2.0
- pytorch>=1.9
- torchvision
- pytorch-cuda=11.6
- pytorch-cuda>=11.6
- pip
- pip:
- -r requirements-dev.txt
2 changes: 1 addition & 1 deletion monai/data/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -927,7 +927,7 @@ def compute_shape_offset(
corners = in_affine_ @ corners
all_dist = corners_out[:-1].copy()
corners_out = corners_out[:-1] / corners_out[-1]
out_shape = np.round(corners_out.ptp(axis=1)) if scale_extent else np.round(corners_out.ptp(axis=1) + 1.0)
out_shape = np.round(np.ptp(corners_out, axis=1)) if scale_extent else np.round(np.ptp(corners_out, axis=1) + 1.0)
offset = None
for i in range(corners.shape[1]):
min_corner = np.min(all_dist - all_dist[:, i : i + 1], 1)
Expand Down
2 changes: 1 addition & 1 deletion monai/transforms/io/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def switch_endianness(data, new="<"):
if new not in ("<", ">"):
raise NotImplementedError(f"Not implemented option new={new}.")
if current_ != new:
data = data.byteswap().newbyteorder(new)
data = data.byteswap().view(data.dtype.newbyteorder(new))
elif isinstance(data, tuple):
data = tuple(switch_endianness(x, new) for x in data)
elif isinstance(data, list):
Expand Down
2 changes: 1 addition & 1 deletion monai/transforms/spatial/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -373,7 +373,7 @@ def rotate(img, angle, output_shape, mode, padding_mode, align_corners, dtype, l
if output_shape is None:
corners = np.asarray(np.meshgrid(*[(0, dim) for dim in im_shape], indexing="ij")).reshape((len(im_shape), -1))
corners = transform[:-1, :-1] @ corners # type: ignore
output_shape = np.asarray(corners.ptp(axis=1) + 0.5, dtype=int)
output_shape = np.asarray(np.ptp(corners, axis=1) + 0.5, dtype=int)
else:
output_shape = np.asarray(output_shape, dtype=int)
shift = create_translate(input_ndim, ((np.array(im_shape) - 1) / 2).tolist())
Expand Down
4 changes: 2 additions & 2 deletions monai/transforms/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,8 +203,8 @@ def set_random_state(self, seed: int | None = None, state: np.random.RandomState
"""
if seed is not None:
_seed = id(seed) if not isinstance(seed, (int, np.integer)) else seed
_seed = _seed % MAX_SEED
_seed = np.int64(id(seed) if not isinstance(seed, (int, np.integer)) else seed)
_seed = _seed % MAX_SEED # need to account for Numpy2.0 which doesn't silently convert to int64
self.R = np.random.RandomState(_seed)
return self

Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
torch>=1.9
numpy>=1.20,<=1.26.0
numpy>=1.24,<2.0
2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ setup_requires =
ninja
install_requires =
torch>=1.9
numpy>=1.20
numpy>=1.24,<2.0

[options.extras_require]
all =
Expand Down
2 changes: 1 addition & 1 deletion tests/test_meta_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -448,7 +448,7 @@ def test_shape(self):

def test_astype(self):
t = MetaTensor([1.0], affine=torch.tensor(1), meta={"fname": "filename"})
for np_types in ("float32", "np.float32", "numpy.float32", np.float32, float, "int", np.compat.long, np.uint16):
for np_types in ("float32", "np.float32", "numpy.float32", np.float32, float, "int", np.uint16):
self.assertIsInstance(t.astype(np_types), np.ndarray)
for pt_types in ("torch.float", torch.float, "torch.float64"):
self.assertIsInstance(t.astype(pt_types), torch.Tensor)
Expand Down
2 changes: 1 addition & 1 deletion tests/test_nifti_endianness.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def test_switch(self): # verify data types
after = switch_endianness(before)
np.testing.assert_allclose(after.astype(float), expected_float)

before = np.array(["1.12", "-9.2", "42"], dtype=np.string_)
before = np.array(["1.12", "-9.2", "42"], dtype=np.bytes_)
after = switch_endianness(before)
np.testing.assert_array_equal(before, after)

Expand Down
4 changes: 2 additions & 2 deletions tests/test_signal_fillempty.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class TestSignalFillEmptyNumpy(unittest.TestCase):
def test_correct_parameters_multi_channels(self):
self.assertIsInstance(SignalFillEmpty(replacement=0.0), SignalFillEmpty)
sig = np.load(TEST_SIGNAL)
sig[:, 123] = np.NAN
sig[:, 123] = np.nan
fillempty = SignalFillEmpty(replacement=0.0)
fillemptysignal = fillempty(sig)
self.assertTrue(not np.isnan(fillemptysignal).any())
Expand All @@ -42,7 +42,7 @@ class TestSignalFillEmptyTorch(unittest.TestCase):
def test_correct_parameters_multi_channels(self):
self.assertIsInstance(SignalFillEmpty(replacement=0.0), SignalFillEmpty)
sig = convert_to_tensor(np.load(TEST_SIGNAL))
sig[:, 123] = convert_to_tensor(np.NAN)
sig[:, 123] = convert_to_tensor(np.nan)
fillempty = SignalFillEmpty(replacement=0.0)
fillemptysignal = fillempty(sig)
self.assertTrue(not torch.isnan(fillemptysignal).any())
Expand Down
4 changes: 2 additions & 2 deletions tests/test_signal_fillemptyd.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class TestSignalFillEmptyNumpy(unittest.TestCase):
def test_correct_parameters_multi_channels(self):
self.assertIsInstance(SignalFillEmptyd(replacement=0.0), SignalFillEmptyd)
sig = np.load(TEST_SIGNAL)
sig[:, 123] = np.NAN
sig[:, 123] = np.nan
data = {}
data["signal"] = sig
fillempty = SignalFillEmptyd(keys=("signal",), replacement=0.0)
Expand All @@ -46,7 +46,7 @@ class TestSignalFillEmptyTorch(unittest.TestCase):
def test_correct_parameters_multi_channels(self):
self.assertIsInstance(SignalFillEmptyd(replacement=0.0), SignalFillEmptyd)
sig = convert_to_tensor(np.load(TEST_SIGNAL))
sig[:, 123] = convert_to_tensor(np.NAN)
sig[:, 123] = convert_to_tensor(np.nan)
data = {}
data["signal"] = sig
fillempty = SignalFillEmptyd(keys=("signal",), replacement=0.0)
Expand Down

0 comments on commit 3a6f620

Please sign in to comment.