Skip to content

Commit

Permalink
3595 3766 adds a base writer and an itk writer (#3674)
Browse files Browse the repository at this point in the history
* temp spatial_resample

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* fixes resampling

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* fixes precisions

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* update dict version

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* fixes unit tests

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* adds docs

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* copy grid for resampling

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* fixes unit tests

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* remove normalize coordinates

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* [MONAI] python code formatting

Signed-off-by: monai-bot <monai.miccai2019@gmail.com>

* try to fix #3621 (#3673)

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* fixes typing

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* fixes grid_sample, interpolate URLs

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* simplify norm_coords

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* update docstring

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* update moveaxis

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* spatial sample tests

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* additional tests spatial resample

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* test invert saptial resample

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* adds a base writer and an itk writer

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* update docstrings

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* remove return self

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* adds reorient_spatial_axes

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* update based on comments

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* update based on comments

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* fixes unit tests

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* sync 3701

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* try to fix #3766

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* revise docstring to be concise

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* update based on comments

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* 3765 Enhance `create_multigpu_supervised_XXX` for distributed (#3768)

* [DLMED] add check for devices

Signed-off-by: Nic Ma <nma@nvidia.com>

* [DLMED] update according to comments

Signed-off-by: Nic Ma <nma@nvidia.com>

* update to support dynamic spatial_size

Signed-off-by: Wenqi Li <wenqil@nvidia.com>

* update based on comments

Signed-off-by: Wenqi Li <wenqil@nvidia.com>
  • Loading branch information
wyli authored Feb 7, 2022
1 parent fa066fb commit afcf593
Show file tree
Hide file tree
Showing 9 changed files with 606 additions and 45 deletions.
13 changes: 13 additions & 0 deletions docs/source/data.rst
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,19 @@ WSIReader
.. autoclass:: WSIReader
:members:

Image writer
------------

ImageWriter
~~~~~~~~~~~
.. autoclass:: ImageWriter
:members:

ITKWriter
~~~~~~~~~
.. autoclass:: ITKWriter
:members:

Nifti format handling
---------------------

Expand Down
3 changes: 3 additions & 0 deletions monai/data/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
from .grid_dataset import GridPatchDataset, PatchDataset, PatchIter
from .image_dataset import ImageDataset
from .image_reader import ImageReader, ITKReader, NibabelReader, NumpyReader, PILReader, WSIReader
from .image_writer import ImageWriter, ITKWriter, logger
from .iterable_dataset import CSVIterableDataset, IterableDataset, ShuffleBuffer
from .nifti_saver import NiftiSaver
from .nifti_writer import write_nifti
Expand All @@ -60,11 +61,13 @@
iter_patch_slices,
json_hashing,
list_data_collate,
orientation_ras_lps,
pad_list_data_collate,
partition_dataset,
partition_dataset_classes,
pickle_hashing,
rectify_header_sform_qform,
reorient_spatial_axes,
resample_datalist,
select_cross_validation_folds,
set_rnd,
Expand Down
395 changes: 395 additions & 0 deletions monai/data/image_writer.py

Large diffs are not rendered by default.

100 changes: 73 additions & 27 deletions monai/data/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,40 +52,46 @@


__all__ = [
"get_random_patch",
"iter_patch_slices",
"dense_patch_slices",
"iter_patch",
"get_valid_patch_size",
"list_data_collate",
"worker_init_fn",
"set_rnd",
"correct_nifti_header_if_necessary",
"rectify_header_sform_qform",
"zoom_affine",
"AFFINE_TOL",
"SUPPORTED_PICKLE_MOD",
"affine_to_spacing",
"compute_importance_map",
"compute_shape_offset",
"to_affine_nd",
"convert_tables_to_dicts",
"correct_nifti_header_if_necessary",
"create_file_basename",
"compute_importance_map",
"decollate_batch",
"dense_patch_slices",
"get_random_patch",
"get_valid_patch_size",
"is_supported_format",
"iter_patch",
"iter_patch_slices",
"json_hashing",
"list_data_collate",
"no_collation",
"orientation_ras_lps",
"pad_list_data_collate",
"partition_dataset",
"partition_dataset_classes",
"pickle_hashing",
"rectify_header_sform_qform",
"reorient_spatial_axes",
"resample_datalist",
"select_cross_validation_folds",
"json_hashing",
"pickle_hashing",
"set_rnd",
"sorted_dict",
"decollate_batch",
"pad_list_data_collate",
"no_collation",
"convert_tables_to_dicts",
"SUPPORTED_PICKLE_MOD",
"reorient_spatial_axes",
"to_affine_nd",
"worker_init_fn",
"zoom_affine",
]

# module to be used by `torch.save`
SUPPORTED_PICKLE_MOD = {"pickle": pickle}

# tolerance for affine matrix computation
AFFINE_TOL = 1e-3


def get_random_patch(
dims: Sequence[int], patch_size: Sequence[int], rand_state: Optional[np.random.RandomState] = None
Expand Down Expand Up @@ -547,6 +553,30 @@ def set_rnd(obj, seed: int) -> int:
return seed


def affine_to_spacing(affine: NdarrayTensor, r: int = 3, dtype=float, suppress_zeros: bool = True) -> NdarrayTensor:
"""
Computing the current spacing from the affine matrix.
Args:
affine: a d x d affine matrix.
r: indexing based on the spatial rank, spacing is computed from `affine[:r, :r]`.
dtype: data type of the output.
suppress_zeros: whether to surpress the zeros with ones.
Returns:
an `r` dimensional vector of spacing.
"""
_affine, *_ = convert_to_dst_type(affine[:r, :r], dst=affine, dtype=dtype)
if isinstance(_affine, torch.Tensor):
spacing = torch.sqrt(torch.sum(_affine * _affine, dim=0))
else:
spacing = np.sqrt(np.sum(_affine * _affine, axis=0))
if suppress_zeros:
spacing[spacing == 0] = 1.0
spacing_, *_ = convert_to_dst_type(spacing, dst=affine, dtype=dtype)
return spacing_


def correct_nifti_header_if_necessary(img_nii):
"""
Check nifti object header's format, update the header if needed.
Expand All @@ -562,7 +592,7 @@ def correct_nifti_header_if_necessary(img_nii):
return img_nii # do nothing for high-dimensional array
# check that affine matches zooms
pixdim = np.asarray(img_nii.header.get_zooms())[:dim]
norm_affine = np.sqrt(np.sum(np.square(img_nii.affine[:dim, :dim]), 0))
norm_affine = affine_to_spacing(img_nii.affine, r=dim)
if np.allclose(pixdim, norm_affine):
return img_nii
if hasattr(img_nii, "get_sform"):
Expand All @@ -583,8 +613,8 @@ def rectify_header_sform_qform(img_nii):
d = img_nii.header["dim"][0]
pixdim = np.asarray(img_nii.header.get_zooms())[:d]
sform, qform = img_nii.get_sform(), img_nii.get_qform()
norm_sform = np.sqrt(np.sum(np.square(sform[:d, :d]), 0))
norm_qform = np.sqrt(np.sum(np.square(qform[:d, :d]), 0))
norm_sform = affine_to_spacing(sform, r=d)
norm_qform = affine_to_spacing(qform, r=d)
sform_mismatch = not np.allclose(norm_sform, pixdim)
qform_mismatch = not np.allclose(norm_qform, pixdim)

Expand All @@ -601,7 +631,7 @@ def rectify_header_sform_qform(img_nii):
img_nii.set_qform(img_nii.get_sform())
return img_nii

norm = np.sqrt(np.sum(np.square(img_nii.affine[:d, :d]), 0))
norm = affine_to_spacing(img_nii.affine, r=d)
warnings.warn(f"Modifying image pixdim from {pixdim} to {norm}")

img_nii.header.set_zooms(norm)
Expand Down Expand Up @@ -641,7 +671,7 @@ def zoom_affine(affine: np.ndarray, scale: Union[np.ndarray, Sequence[float]], d

d = len(affine) - 1
# compute original pixdim
norm = np.sqrt(np.sum(np.square(affine), 0))[:-1]
norm = affine_to_spacing(affine, r=d)
if len(scale_np) < d: # defaults based on affine
scale_np = np.append(scale_np, norm[len(scale_np) :])
scale_np = scale_np[:d]
Expand Down Expand Up @@ -693,7 +723,7 @@ def compute_shape_offset(
k = 0
for i in range(corners.shape[1]):
min_corner = np.min(mat @ corners[:-1, :] - mat @ corners[:-1, i : i + 1], 1)
if np.allclose(min_corner, 0.0, rtol=1e-3):
if np.allclose(min_corner, 0.0, rtol=AFFINE_TOL):
k = i
break
offset = corners[:-1, k]
Expand Down Expand Up @@ -1259,3 +1289,19 @@ def convert_tables_to_dicts(
data = [dict(d, **{k: v[i] for k, v in groups.items()}) for i, d in enumerate(data)]

return data


def orientation_ras_lps(affine: NdarrayTensor) -> NdarrayTensor:
"""
Convert the ``affine`` between the `RAS` and `LPS` orientation
by flipping the first two spatial dimensions.
Args:
affine: a 2D affine matrix.
"""
sr = max(affine.shape[0] - 1, 1) # spatial rank is at least 1
flip_d = [[-1, 1], [-1, -1, 1], [-1, -1, 1, 1]]
flip_diag = flip_d[min(sr - 1, 2)] + [1] * (sr - 3)
if isinstance(affine, torch.Tensor):
return torch.diag(torch.as_tensor(flip_diag).to(affine)) @ affine # type: ignore
return np.diag(flip_diag).astype(affine.dtype) @ affine # type: ignore
33 changes: 17 additions & 16 deletions monai/transforms/spatial/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

from monai.config import USE_COMPILED, DtypeLike
from monai.config.type_definitions import NdarrayOrTensor
from monai.data.utils import compute_shape_offset, reorient_spatial_axes, to_affine_nd, zoom_affine
from monai.data.utils import AFFINE_TOL, compute_shape_offset, reorient_spatial_axes, to_affine_nd, zoom_affine
from monai.networks.layers import AffineTransform, GaussianFilter, grid_pull
from monai.networks.utils import meshgrid_ij, normalize_transform
from monai.transforms.croppad.array import CenterSpatialCrop, Pad
Expand Down Expand Up @@ -54,8 +54,6 @@
from monai.utils.module import look_up_option
from monai.utils.type_conversion import convert_data_type, convert_to_dst_type

AFFINE_TOL = 1e-3

nib, has_nib = optional_import("nibabel")

__all__ = [
Expand Down Expand Up @@ -132,7 +130,7 @@ def __call__(
img: NdarrayOrTensor,
src_affine: Optional[NdarrayOrTensor] = None,
dst_affine: Optional[NdarrayOrTensor] = None,
spatial_size: Optional[Union[Sequence[int], int]] = None,
spatial_size: Optional[Union[Sequence[int], np.ndarray, int]] = None,
mode: Union[GridSampleMode, str, None] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str, None] = GridSamplePadMode.BORDER,
align_corners: Optional[bool] = False,
Expand Down Expand Up @@ -175,18 +173,27 @@ def __call__(
if src_affine is None:
src_affine = np.eye(4, dtype=np.float64)
spatial_rank = min(len(img.shape) - 1, src_affine.shape[0] - 1, 3)
if spatial_size is not -1 and spatial_size is not None:
spatial_rank = min(len(ensure_tuple(spatial_size)), 3) # infer spatial rank based on spatial_size
src_affine = to_affine_nd(spatial_rank, src_affine)
dst_affine = to_affine_nd(spatial_rank, dst_affine) if dst_affine is not None else src_affine
dst_affine, *_ = convert_to_dst_type(dst_affine, dst_affine, dtype=torch.float32)

if allclose(src_affine, dst_affine, atol=AFFINE_TOL):
in_spatial_size = np.asarray(img.shape[1 : spatial_rank + 1])
if spatial_size is -1: # using the input spatial size
spatial_size = in_spatial_size
elif spatial_size is None: # auto spatial size
spatial_size, _ = compute_shape_offset(in_spatial_size, src_affine, dst_affine) # type: ignore
spatial_size = np.asarray(fall_back_tuple(ensure_tuple(spatial_size)[:spatial_rank], in_spatial_size))

if allclose(src_affine, dst_affine, atol=AFFINE_TOL) and allclose(spatial_size, in_spatial_size):
# no significant change, return original image
output_data, *_ = convert_to_dst_type(img, img, dtype=torch.float32)
return output_data, dst_affine

if has_nib and isinstance(img, np.ndarray):
spatial_ornt, dst_r = reorient_spatial_axes(img.shape[1 : spatial_rank + 1], src_affine, dst_affine)
if allclose(dst_r, dst_affine, atol=AFFINE_TOL):
if allclose(dst_r, dst_affine, atol=AFFINE_TOL) and allclose(spatial_size, in_spatial_size):
# simple reorientation achieves the desired affine
spatial_ornt[:, 0] += 1
spatial_ornt = np.concatenate([np.array([[0, 1]]), spatial_ornt])
Expand All @@ -208,18 +215,12 @@ def __call__(
raise ValueError(f"src affine is not invertible: {src_affine}") from e
xform = to_affine_nd(spatial_rank, xform)
# no resampling if it's identity transform
if allclose(xform, np.diag(np.ones(len(xform))), atol=AFFINE_TOL):
if allclose(xform, np.diag(np.ones(len(xform))), atol=AFFINE_TOL) and allclose(spatial_size, in_spatial_size):
output_data, *_ = convert_to_dst_type(img, img, dtype=torch.float32)
return output_data, dst_affine

_dtype = dtype or self.dtype or img.dtype
spatial_size = ensure_tuple(spatial_size)
in_spatial_size = list(img.shape[1 : spatial_rank + 1])
if spatial_size[0] == -1: # if the spatial_size == -1
spatial_size = in_spatial_size
elif spatial_size[0] is None:
spatial_size, _ = compute_shape_offset(in_spatial_size, src_affine, dst_affine) # type: ignore
spatial_size = spatial_size[:spatial_rank]
in_spatial_size = in_spatial_size.tolist()
chns, additional_dims = img.shape[0], img.shape[spatial_rank + 1 :] # beyond three spatial dims
# resample
img_ = convert_data_type(img, torch.Tensor, dtype=_dtype)[0]
Expand Down Expand Up @@ -270,7 +271,7 @@ class Spacing(Transform):

def __init__(
self,
pixdim: Union[Sequence[float], float],
pixdim: Union[Sequence[float], float, np.ndarray],
diagonal: bool = False,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
Expand Down Expand Up @@ -334,7 +335,7 @@ def __call__(
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
align_corners: Optional[bool] = None,
dtype: DtypeLike = None,
output_spatial_shape: Optional[Union[Sequence[int], int]] = None,
output_spatial_shape: Optional[Union[Sequence[int], np.ndarray, int]] = None,
) -> Union[NdarrayOrTensor, Tuple[NdarrayOrTensor, NdarrayOrTensor, NdarrayOrTensor]]:
"""
Args:
Expand Down
3 changes: 2 additions & 1 deletion monai/transforms/spatial/dictionary.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@

from monai.config import DtypeLike, KeysCollection
from monai.config.type_definitions import NdarrayOrTensor
from monai.data.utils import affine_to_spacing
from monai.networks.layers import AffineTransform
from monai.networks.layers.simplelayers import GaussianFilter
from monai.transforms.croppad.array import CenterSpatialCrop, SpatialPad
Expand Down Expand Up @@ -435,7 +436,7 @@ def inverse(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, Nd
padding_mode = transform[TraceKeys.EXTRA_INFO]["padding_mode"]
align_corners = transform[TraceKeys.EXTRA_INFO]["align_corners"]
orig_size = transform[TraceKeys.ORIG_SIZE]
orig_pixdim = np.sqrt(np.sum(np.square(old_affine), 0))[:-1]
orig_pixdim = affine_to_spacing(old_affine, -1)
inverse_transform = Spacing(orig_pixdim, diagonal=self.spacing_transform.diagonal)
# Apply inverse
d[key], _, new_affine = inverse_transform(
Expand Down
55 changes: 55 additions & 0 deletions tests/test_itk_writer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import tempfile
import unittest

import numpy as np
import torch

from monai.data import ITKWriter
from monai.utils import optional_import

itk, has_itk = optional_import("itk")
nib, has_nibabel = optional_import("nibabel")


@unittest.skipUnless(has_itk, "Requires `itk` package.")
class TestITKWriter(unittest.TestCase):
def test_channel_shape(self):
with tempfile.TemporaryDirectory() as tempdir:
for c in (0, 1, 2, 3):
fname = os.path.join(tempdir, f"testing{c}.nii")
itk_writer = ITKWriter()
itk_writer.set_data_array(torch.zeros(1, 2, 3, 4), channel_dim=c, squeeze_end_dims=False)
itk_writer.set_metadata({})
itk_writer.write(fname)
itk_obj = itk.imread(fname)
s = [1, 2, 3, 4]
s.pop(c)
np.testing.assert_allclose(itk.size(itk_obj), s)

def test_rgb(self):
with tempfile.TemporaryDirectory() as tempdir:
fname = os.path.join(tempdir, "testing.png")
writer = ITKWriter(output_dtype=np.uint8)
writer.set_data_array(np.arange(48).reshape(3, 4, 4), channel_dim=0)
writer.set_metadata({"spatial_shape": (5, 5)})
writer.write(fname)

output = np.asarray(itk.imread(fname))
np.testing.assert_allclose(output.shape, (5, 5, 3))
np.testing.assert_allclose(output[1, 1], (5, 5, 4))


if __name__ == "__main__":
unittest.main()
Loading

0 comments on commit afcf593

Please sign in to comment.