Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

merging 6339/6341 #6343

Merged
merged 3 commits into from
Apr 12, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion monai/transforms/lazy/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def resample(data: torch.Tensor, matrix: NdarrayOrTensor, kwargs: dict | None =
- "lazy_padding_mode"
- "lazy_interpolation_mode" (this option might be ignored when ``mode="auto"``.)
- "lazy_align_corners"
- "lazy_dtype"
- "lazy_dtype" (dtype for resampling computation; this might be ignored when ``mode="auto"``.)
- "atol" for tolerance for matrix floating point comparison.
- "lazy_resample_mode" for resampling backend, default to `"auto"`. Setting to other values will use the
`monai.transforms.SpatialResample` for resampling.
Expand Down Expand Up @@ -218,6 +218,7 @@ def resample(data: torch.Tensor, matrix: NdarrayOrTensor, kwargs: dict | None =
img.affine = call_kwargs["dst_affine"]
return img
img = monai.transforms.crop_or_pad_nd(img, matrix_np, out_spatial_size, mode=call_kwargs["padding_mode"])
img = img.to(torch.float32) # consistent with monai.transforms.spatial.functional.spatial_resample
img.affine = call_kwargs["dst_affine"]
return img

Expand Down
4 changes: 2 additions & 2 deletions requirements-dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ mccabe
pep8-naming
pycodestyle
pyflakes
black
isort
black>=22.12
isort>=5.1
pytype>=2020.6.1; platform_system != "Windows"
types-pkg_resources
mypy>=0.790
Expand Down
6 changes: 4 additions & 2 deletions tests/test_bundle_trt_export.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def tearDown(self):
del os.environ["CUDA_VISIBLE_DEVICES"] # previously unset

@parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4])
@unittest.skipUnless(has_torchtrt and has_tensorrt, "Torch-TensorRT is required for convert!")
@unittest.skipUnless(has_torchtrt and has_tensorrt, "Torch-TensorRT is required for conversion!")
def test_trt_export(self, convert_precision, input_shape, dynamic_batch):
meta_file = os.path.join(os.path.dirname(__file__), "testing_data", "metadata.json")
config_file = os.path.join(os.path.dirname(__file__), "testing_data", "inference.json")
Expand Down Expand Up @@ -95,7 +95,9 @@ def test_trt_export(self, convert_precision, input_shape, dynamic_batch):
self.assertTrue("network_def" in json.loads(extra_files["inference.json"]))

@parameterized.expand([TEST_CASE_3, TEST_CASE_4])
@unittest.skipUnless(has_onnx, "Onnx is required for onnx-trt conversion!")
@unittest.skipUnless(
has_onnx and has_torchtrt and has_tensorrt, "Onnx and TensorRT are required for onnx-trt conversion!"
)
def test_onnx_trt_export(self, convert_precision, input_shape, dynamic_batch):
meta_file = os.path.join(os.path.dirname(__file__), "testing_data", "metadata.json")
config_file = os.path.join(os.path.dirname(__file__), "testing_data", "inference.json")
Expand Down
5 changes: 5 additions & 0 deletions tests/test_resample.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,11 @@ def test_resample_function_impl(self, img, matrix, expected):
out = resample(convert_to_tensor(img), matrix, {"lazy_shape": img.shape[1:], "lazy_padding_mode": "border"})
assert_allclose(out[0], expected, type_test=False)

img = convert_to_tensor(img, dtype=torch.uint8)
out = resample(img, matrix, {"lazy_resample_mode": "auto", "lazy_dtype": torch.float})
out_1 = resample(img, matrix, {"lazy_resample_mode": "other value", "lazy_dtype": torch.float})
self.assertIs(out.dtype, out_1.dtype) # testing dtype in different lazy_resample_mode


if __name__ == "__main__":
unittest.main()