Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

3276 - adds an option to skip renorm #3277

Merged
merged 7 commits into from
Nov 8, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 31 additions & 15 deletions monai/transforms/intensity/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -414,7 +414,7 @@ def __init__(
minv: minimum value of output data.
maxv: maximum value of output data.
factor: factor scale by ``v = v * (1 + factor)``. In order to use
this parameter, please set `minv` and `maxv` into None.
this parameter, please set both `minv` and `maxv` into None.
channel_wise: if True, scale on each channel separately. Please ensure
that the first dimension represents the channel of the image if True.
dtype: output data type, if None, same as input image. defaults to float32.
Expand All @@ -433,7 +433,7 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
ValueError: When ``self.minv=None`` or ``self.maxv=None`` and ``self.factor=None``. Incompatible values.

"""
if self.minv is not None and self.maxv is not None:
if self.minv is not None or self.maxv is not None:
if self.channel_wise:
out = [rescale_array(d, self.minv, self.maxv, dtype=self.dtype) for d in img]
ret = torch.stack(out) if isinstance(img, torch.Tensor) else np.stack(out) # type: ignore
Expand Down Expand Up @@ -722,6 +722,9 @@ class ScaleIntensityRange(Transform):
Apply specific intensity scaling to the whole numpy array.
Scaling from [a_min, a_max] to [b_min, b_max] with clip option.

When `b_min` or `b_max` are `None`, `scacled_array * (b_max - b_min) + b_min` will be skipped.
If `clip=True`, when `b_min`/`b_max` is None, the clipping is not performed on the corresponding edge.

Args:
a_min: intensity original range min.
a_max: intensity original range max.
Expand All @@ -734,7 +737,13 @@ class ScaleIntensityRange(Transform):
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]

def __init__(
self, a_min: float, a_max: float, b_min: float, b_max: float, clip: bool = False, dtype: DtypeLike = np.float32
self,
a_min: float,
a_max: float,
b_min: Optional[float] = None,
b_max: Optional[float] = None,
clip: bool = False,
dtype: DtypeLike = np.float32,
) -> None:
self.a_min = a_min
self.a_max = a_max
Expand All @@ -750,10 +759,13 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
dtype = self.dtype or img.dtype
if self.a_max - self.a_min == 0.0:
warn("Divide by zero (a_min == a_max)", Warning)
if self.b_min is None:
return img - self.a_min
return img - self.a_min + self.b_min

img = (img - self.a_min) / (self.a_max - self.a_min)
img = img * (self.b_max - self.b_min) + self.b_min
if (self.b_min is not None) and (self.b_max is not None):
img = img * (self.b_max - self.b_min) + self.b_min
if self.clip:
img = clip(img, self.b_min, self.b_max)
ret, *_ = convert_data_type(img, dtype=dtype)
Expand Down Expand Up @@ -844,11 +856,12 @@ class ScaleIntensityRangePercentiles(Transform):
"""
Apply range scaling to a numpy array based on the intensity distribution of the input.

By default this transform will scale from [lower_intensity_percentile, upper_intensity_percentile] to [b_min, b_max], where
{lower,upper}_intensity_percentile are the intensity values at the corresponding percentiles of ``img``.
By default this transform will scale from [lower_intensity_percentile, upper_intensity_percentile] to
`[b_min, b_max]`, where {lower,upper}_intensity_percentile are the intensity values at the corresponding
percentiles of ``img``.

The ``relative`` parameter can also be set to scale from [lower_intensity_percentile, upper_intensity_percentile] to the
lower and upper percentiles of the output range [b_min, b_max]
The ``relative`` parameter can also be set to scale from [lower_intensity_percentile, upper_intensity_percentile]
to the lower and upper percentiles of the output range [b_min, b_max].

For example:

Expand Down Expand Up @@ -885,6 +898,9 @@ class ScaleIntensityRangePercentiles(Transform):
[20., 60., 100., 140., 180.],
[20., 60., 100., 140., 180.]]]

See Also:

- :py:class:`monai.transforms.ScaleIntensityRange`

Args:
lower: lower intensity percentile.
Expand All @@ -902,8 +918,8 @@ def __init__(
self,
lower: float,
upper: float,
b_min: float,
b_max: float,
b_min: Optional[float],
b_max: Optional[float],
clip: bool = False,
relative: bool = False,
dtype: DtypeLike = np.float32,
Expand All @@ -930,15 +946,15 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
b_max = self.b_max

if self.relative:
if (self.b_min is None) or (self.b_max is None):
raise ValueError("If it is relative, b_min and b_max should not be None.")
b_min = ((self.b_max - self.b_min) * (self.lower / 100.0)) + self.b_min
b_max = ((self.b_max - self.b_min) * (self.upper / 100.0)) + self.b_min

scalar = ScaleIntensityRange(a_min=a_min, a_max=a_max, b_min=b_min, b_max=b_max, clip=False, dtype=self.dtype)
scalar = ScaleIntensityRange(
a_min=a_min, a_max=a_max, b_min=b_min, b_max=b_max, clip=self.clip, dtype=self.dtype
)
img = scalar(img)

if self.clip:
img = clip(img, self.b_min, self.b_max)

return img


Expand Down
10 changes: 5 additions & 5 deletions monai/transforms/intensity/dictionary.py
Original file line number Diff line number Diff line change
Expand Up @@ -506,7 +506,7 @@ def __init__(
minv: minimum value of output data.
maxv: maximum value of output data.
factor: factor scale by ``v = v * (1 + factor)``. In order to use
this parameter, please set `minv` and `maxv` into None.
this parameter, please set both `minv` and `maxv` into None.
channel_wise: if True, scale on each channel separately. Please ensure
that the first dimension represents the channel of the image if True.
dtype: output data type, if None, same as input image. defaults to float32.
Expand Down Expand Up @@ -723,8 +723,8 @@ def __init__(
keys: KeysCollection,
a_min: float,
a_max: float,
b_min: float,
b_max: float,
b_min: Optional[float] = None,
b_max: Optional[float] = None,
clip: bool = False,
dtype: DtypeLike = np.float32,
allow_missing_keys: bool = False,
Expand Down Expand Up @@ -839,8 +839,8 @@ def __init__(
keys: KeysCollection,
lower: float,
upper: float,
b_min: float,
b_max: float,
b_min: Optional[float],
b_max: Optional[float],
clip: bool = False,
relative: bool = False,
dtype: DtypeLike = np.float32,
Expand Down
11 changes: 7 additions & 4 deletions monai/transforms/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,12 +150,13 @@ def zero_margins(img: np.ndarray, margin: int) -> bool:

def rescale_array(
arr: NdarrayOrTensor,
minv: float = 0.0,
maxv: float = 1.0,
minv: Optional[float] = 0.0,
wyli marked this conversation as resolved.
Show resolved Hide resolved
maxv: Optional[float] = 1.0,
dtype: Optional[Union[DtypeLike, torch.dtype]] = np.float32,
) -> NdarrayOrTensor:
"""
Rescale the values of numpy array `arr` to be from `minv` to `maxv`.
If either `minv` or `maxv` is None, it returns `(a - min_a) / (max_a - min_a)`.

Args:
arr: input array to rescale.
Expand All @@ -170,14 +171,16 @@ def rescale_array(
maxa = arr.max()

if mina == maxa:
return arr * minv
return arr * minv if minv is not None else arr

norm = (arr - mina) / (maxa - mina) # normalize the array first
if (minv is None) or (maxv is None):
return norm
return (norm * (maxv - minv)) + minv # rescale by minv and maxv, which is the normalized array by default


def rescale_instance_array(
arr: np.ndarray, minv: float = 0.0, maxv: float = 1.0, dtype: DtypeLike = np.float32
arr: np.ndarray, minv: Optional[float] = 0.0, maxv: Optional[float] = 1.0, dtype: DtypeLike = np.float32
) -> np.ndarray:
"""
Rescale each array slice along the first dimension of `arr` independently.
Expand Down
2 changes: 1 addition & 1 deletion monai/transforms/utils_pytorch_numpy_unification.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def clip(a: NdarrayOrTensor, a_min, a_max) -> NdarrayOrTensor:
if isinstance(a, np.ndarray):
result = np.clip(a, a_min, a_max)
else:
result = torch.clip(a, a_min, a_max)
result = torch.clamp(a, a_min, a_max)
return result


Expand Down
7 changes: 4 additions & 3 deletions tests/test_cachedataset_persistent_workers.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,16 @@
@SkipIfBeforePyTorchVersion((1, 7))
class TestTransformsWCacheDatasetAndPersistentWorkers(unittest.TestCase):
def test_duplicate_transforms(self):
im, _ = create_test_image_2d(128, 128, num_seg_classes=1, channel_dim=0)
data = [{"img": im} for _ in range(2)]
data = [{"img": create_test_image_2d(128, 128, num_seg_classes=1, channel_dim=0)[0]} for _ in range(2)]

# at least 1 deterministic followed by at least 1 random
transform = Compose([Spacingd("img", pixdim=(1, 1)), RandAffined("img", prob=1.0)])

# cachedataset and data loader w persistent_workers
train_ds = CacheDataset(data, transform, cache_num=1)
train_loader = DataLoader(train_ds, num_workers=2, persistent_workers=True)
# num_workers > 1 may fail randomly with 21.09 on A100 test node
# https://github.com/Project-MONAI/MONAI/issues/3283
train_loader = DataLoader(train_ds, num_workers=1, persistent_workers=True)
wyli marked this conversation as resolved.
Show resolved Hide resolved

b1 = next(iter(train_loader))
b2 = next(iter(train_loader))
Expand Down
8 changes: 8 additions & 0 deletions tests/test_scale_intensity.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
import numpy as np

from monai.transforms import ScaleIntensity
from monai.transforms.utils import rescale_array
from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose


Expand All @@ -35,6 +36,13 @@ def test_factor_scale(self):
expected = p((self.imt * (1 + 0.1)).astype(np.float32))
assert_allclose(result, p(expected), rtol=1e-7, atol=0)

def test_max_none(self):
for p in TEST_NDARRAYS:
scaler = ScaleIntensity(minv=0.0, maxv=None, factor=0.1)
result = scaler(p(self.imt))
expected = rescale_array(p(self.imt), minv=0.0, maxv=None)
assert_allclose(result, expected, rtol=1e-3, atol=1e-3)

def test_int(self):
"""integers should be handled by converting them to floats first."""
for p in TEST_NDARRAYS:
Expand Down
8 changes: 8 additions & 0 deletions tests/test_scale_intensity_range.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,14 @@ def test_image_scale_intensity_range(self):
expected = (((self.imt - 20) / 88) * 30 + 50).astype(np.uint8)
assert_allclose(scaled, p(expected))

def test_image_scale_intensity_range_none_clip(self):
scaler = ScaleIntensityRange(a_min=20, a_max=108, b_min=None, b_max=80, clip=True, dtype=np.uint8)
for p in TEST_NDARRAYS:
scaled = scaler(p(self.imt))
self.assertTrue(scaled.dtype, np.uint8)
expected = (np.clip((self.imt - 20) / 88, None, 80)).astype(np.uint8)
assert_allclose(scaled, p(expected))


if __name__ == "__main__":
unittest.main()
7 changes: 7 additions & 0 deletions tests/test_scale_intensity_range_percentiles.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,13 @@ def test_relative_scaling(self):
result = scaler(p(img))
assert_allclose(result, p(expected_img), rtol=1e-4)

scaler = ScaleIntensityRangePercentiles(
lower=lower, upper=upper, b_min=b_min, b_max=b_max, relative=True, clip=True
)
for p in TEST_NDARRAYS:
result = scaler(p(img))
assert_allclose(result, p(np.clip(expected_img, expected_b_min, expected_b_max)), rtol=1e-4)

def test_invalid_instantiation(self):
self.assertRaises(ValueError, ScaleIntensityRangePercentiles, lower=-10, upper=99, b_min=0, b_max=255)
self.assertRaises(ValueError, ScaleIntensityRangePercentiles, lower=101, upper=99, b_min=0, b_max=255)
Expand Down
18 changes: 11 additions & 7 deletions tests/test_scale_intensity_range_percentilesd.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,12 @@
import numpy as np

from monai.transforms.intensity.dictionary import ScaleIntensityRangePercentilesd
from tests.utils import NumpyImageTestCase2D
from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose


class TestScaleIntensityRangePercentilesd(NumpyImageTestCase2D):
def test_scaling(self):
img = self.imt
data = {}
data["img"] = img
lower = 10
upper = 99
b_min = 0
Expand All @@ -32,9 +30,12 @@ def test_scaling(self):
expected = (img - a_min) / (a_max - a_min)
expected = (expected * (b_max - b_min)) + b_min

scaler = ScaleIntensityRangePercentilesd(keys=data.keys(), lower=lower, upper=upper, b_min=b_min, b_max=b_max)

self.assertTrue(np.allclose(expected, scaler(data)["img"]))
for p in TEST_NDARRAYS:
data = {"img": p(img)}
scaler = ScaleIntensityRangePercentilesd(
keys=data.keys(), lower=lower, upper=upper, b_min=b_min, b_max=b_max
)
assert_allclose(p(expected), scaler(data)["img"])

def test_relative_scaling(self):
img = self.imt
Expand All @@ -55,7 +56,7 @@ def test_relative_scaling(self):
expected_img = (img - expected_a_min) / (expected_a_max - expected_a_min)
expected_img = (expected_img * (expected_b_max - expected_b_min)) + expected_b_min

self.assertTrue(np.allclose(expected_img, scaler(data)["img"]))
np.testing.assert_allclose(expected_img, scaler(data)["img"])

def test_invalid_instantiation(self):
self.assertRaises(
Expand All @@ -70,6 +71,9 @@ def test_invalid_instantiation(self):
self.assertRaises(
ValueError, ScaleIntensityRangePercentilesd, keys=["img"], lower=30, upper=1000, b_min=0, b_max=255
)
with self.assertRaises(ValueError):
s = ScaleIntensityRangePercentilesd(keys=["img"], lower=30, upper=90, b_min=None, b_max=20, relative=True)
s(self.imt)


if __name__ == "__main__":
Expand Down
8 changes: 8 additions & 0 deletions tests/test_scale_intensity_ranged.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,14 @@ def test_image_scale_intensity_ranged(self):
expected = expected * 30 + 50
assert_allclose(scaled[key], p(expected))

def test_image_scale_intensity_ranged_none(self):
key = "img"
scaler = ScaleIntensityRanged(keys=key, a_min=20, a_max=108, b_min=None, b_max=None)
for p in TEST_NDARRAYS:
scaled = scaler({key: p(self.imt)})
expected = (self.imt - 20) / 88
assert_allclose(scaled[key], p(expected))


if __name__ == "__main__":
unittest.main()