Skip to content

Commit 5ba7835

Browse files
authored
App SDK updated to be compatible with monai 1.0.0 and its MetaTensor (#360)
* Updated to be compitable with monai 1.0.0 and its MetaTensor Signed-off-by: M Q <mingmelvinq@nvidia.com> * Made bundle inference operator compatible with MetaTensor Signed-off-by: M Q <mingmelvinq@nvidia.com> * Removed the use of ToTensor from all apps. Signed-off-by: M Q <mingmelvinq@nvidia.com> * Fix MyPy complaints Signed-off-by: M Q <mingmelvinq@nvidia.com> * Fix isort complaint Signed-off-by: M Q <mingmelvinq@nvidia.com> * Use a constant for repeated use of monai.utils str literal; SonarCloud code smell. Signed-off-by: M Q <mingmelvinq@nvidia.com> * Updated Seg Writer and Spleen Seg Jupyter notebooks Signed-off-by: M Q <mingmelvinq@nvidia.com> * Minor to address comments and checker complaint Signed-off-by: M Q <mingmelvinq@nvidia.com> * Updated clara-viz Jupyter notebook and fixed checking complaint Signed-off-by: M Q <mingmelvinq@nvidia.com> Signed-off-by: M Q <mingmelvinq@nvidia.com>
1 parent 8e959ab commit 5ba7835

File tree

13 files changed

+2904
-1732
lines changed

13 files changed

+2904
-1732
lines changed

examples/apps/ai_livertumor_seg_app/livertumor_seg_operator.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,14 +27,13 @@
2727
SaveImaged,
2828
ScaleIntensityRanged,
2929
Spacingd,
30-
ToTensord,
3130
)
3231

3332

3433
@md.input("image", Image, IOType.IN_MEMORY)
3534
@md.output("seg_image", Image, IOType.IN_MEMORY)
3635
@md.output("saved_images_folder", DataPath, IOType.DISK)
37-
@md.env(pip_packages=["monai==0.9.0", "torch>=1.5", "numpy>=1.21", "nibabel"])
36+
@md.env(pip_packages=["monai>=1.0.0", "torch>=1.5", "numpy>=1.21", "nibabel"])
3837
class LiverTumorSegOperator(Operator):
3938
"""Performs liver and tumor segmentation using a DL model with an image converted from a DICOM CT series.
4039
@@ -121,7 +120,6 @@ def pre_process(self, img_reader, out_dir: str = "./input_images") -> Compose:
121120
Spacingd(keys=my_key, pixdim=(1.0, 1.0, 1.0), mode=("bilinear"), align_corners=True),
122121
ScaleIntensityRanged(my_key, a_min=-21, a_max=189, b_min=0.0, b_max=1.0, clip=True),
123122
CropForegroundd(my_key, source_key=my_key),
124-
ToTensord(my_key),
125123
]
126124
)
127125

examples/apps/ai_spleen_seg_app/app.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,12 @@ def compose(self):
8181
algorithm_version="0.1.0",
8282
)
8383
]
84-
dicom_seg_writer = DICOMSegmentationWriterOperator(segment_descriptions=segment_descriptions)
84+
85+
custom_tags = {"SeriesDescription": "AI generated Seg, not for clinical use."}
86+
87+
dicom_seg_writer = DICOMSegmentationWriterOperator(
88+
segment_descriptions=segment_descriptions, custom_tags=custom_tags
89+
)
8590

8691
# Create the processing pipeline, by specifying the source and destination operators, and
8792
# ensuring the output from the former matches the input of the latter, in both name and type.

examples/apps/ai_unetr_seg_app/unetr_seg_operator.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,6 @@
2828
SaveImaged,
2929
ScaleIntensityRanged,
3030
Spacingd,
31-
ToTensord,
3231
)
3332

3433

@@ -114,7 +113,6 @@ def pre_process(self, img_reader, out_dir: str = "./input_images") -> Compose:
114113
Orientationd(keys=my_key, axcodes="RAS"),
115114
ScaleIntensityRanged(my_key, a_min=-175, a_max=250, b_min=0.0, b_max=1.0, clip=True),
116115
CropForegroundd(my_key, source_key=my_key),
117-
ToTensord(my_key),
118116
]
119117
)
120118

integrations/nuance_pin/app/inference.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@
3737
ScaleIntensityRanged,
3838
Spacingd,
3939
ToDeviced,
40-
ToTensord,
4140
)
4241

4342
sliding_window_inference, _ = optional_import("monai.inferers", name="sliding_window_inference")
@@ -178,9 +177,6 @@ def pre_process(self, img_reader) -> Compose:
178177
keys=[image_key, f"{image_key}_meta_dict"],
179178
names=[orig_image_key, f"{orig_image_key}_meta_dict"],
180179
),
181-
ToTensord(
182-
keys=image_key,
183-
),
184180
ToDeviced(keys=image_key, device="cuda"),
185181
EnsureChannelFirstd(keys=image_key),
186182
Spacingd(keys=image_key, pixdim=(0.703125, 0.703125, 1.25)),

monai/deploy/operators/dicom_seg_writer_operator.py

Lines changed: 52 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -9,10 +9,11 @@
99
# See the License for the specific language governing permissions and
1010
# limitations under the License.
1111

12-
import os
12+
import datetime
13+
import logging
1314
from pathlib import Path
1415
from random import randint
15-
from typing import TYPE_CHECKING, List, Optional, Sequence, Union
16+
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Union
1617

1718
import numpy as np
1819
from typeguard import typechecked
@@ -166,10 +167,14 @@ class DICOMSegmentationWriterOperator(Operator):
166167
SUPPORTED_EXTENSIONS = [".nii", ".nii.gz", ".mhd"]
167168
# DICOM instance file extension. Case insensitive in string comparison.
168169
DCM_EXTENSION = ".dcm"
169-
# Suffix to add to file name to indicate DICOM Seg dcm file.
170-
DICOMSEG_SUFFIX = "-DICOMSEG"
171170

172-
def __init__(self, segment_descriptions: List[SegmentDescription], *args, **kwargs):
171+
def __init__(
172+
self,
173+
segment_descriptions: List[SegmentDescription],
174+
custom_tags: Optional[Dict[str, str]] = None,
175+
*args,
176+
**kwargs,
177+
):
173178
super().__init__(*args, **kwargs)
174179
"""Instantiates the DICOM Seg Writer instance with optional list of segment label strings.
175180
@@ -185,11 +190,14 @@ def __init__(self, segment_descriptions: List[SegmentDescription], *args, **kwar
185190
segment label information, including label value, name, description etc.
186191
187192
Args:
188-
segment_descriptions: Object encapsulating the description of each segment present in the
189-
segmentation.
193+
segment_descriptions: List[SegmentDescription]
194+
Object encapsulating the description of each segment present in the segmentation.
195+
custom_tags: OptonalDict[str, str], optional
196+
Dictionary for setting custom DICOM tags using Keywords and str values only
190197
"""
191198

192199
self._seg_descs = [sd.to_segment_description(n) for n, sd in enumerate(segment_descriptions, 1)]
200+
self._custom_tags = custom_tags
193201

194202
def compute(self, op_input: InputContext, op_output: OutputContext, context: ExecutionContext):
195203
"""Performs computation for this operator and handles I/O.
@@ -241,25 +249,26 @@ def process_images(
241249
else:
242250
raise ValueError("'image' is not an Image object or a supported image file.")
243251

244-
# The output DICOM Seg instance file name is based on the actual or made-up input image file name.
245-
output_filename = "{0}{1}{2}".format(
246-
os.path.splitext(os.path.basename(input_path))[0],
247-
DICOMSegmentationWriterOperator.DICOMSEG_SUFFIX,
248-
DICOMSegmentationWriterOperator.DCM_EXTENSION,
249-
)
250-
output_path = output_dir / output_filename
251252
# Pick DICOM Series that was used as input for getting the seg image.
252253
# For now, first one in the list.
253254
for study_selected_series in study_selected_series_list:
254255
if not isinstance(study_selected_series, StudySelectedSeries):
255256
raise ValueError("Element in input is not expected type, 'StudySelectedSeries'.")
256257
selected_series = study_selected_series.selected_series[0]
257258
dicom_series = selected_series.series
258-
self.create_dicom_seg(seg_image_numpy, dicom_series, output_path)
259+
self.create_dicom_seg(seg_image_numpy, dicom_series, output_dir)
259260
break
260261

261-
def create_dicom_seg(self, image: np.ndarray, dicom_series: DICOMSeries, file_path: Path):
262-
file_path.parent.absolute().mkdir(parents=True, exist_ok=True)
262+
def create_dicom_seg(self, image: np.ndarray, dicom_series: DICOMSeries, output_dir: Path):
263+
# Generate SOP instance UID, and use it as dcm file name too
264+
seg_sop_instance_uid = hd.UID() # generate_uid() can be used too.
265+
266+
if not output_dir.is_dir():
267+
try:
268+
output_dir.mkdir(parents=True, exist_ok=True)
269+
except Exception:
270+
raise ValueError("output_dir {output_dir} does not exist and failed to be created.") from None
271+
output_path = output_dir / f"{seg_sop_instance_uid}{DICOMSegmentationWriterOperator.DCM_EXTENSION}"
263272

264273
dicom_dataset_list = [i.get_native_sop_instance() for i in dicom_series.get_sop_instances()]
265274

@@ -275,18 +284,37 @@ def create_dicom_seg(self, image: np.ndarray, dicom_series: DICOMSeries, file_pa
275284
segment_descriptions=self._seg_descs,
276285
series_instance_uid=hd.UID(),
277286
series_number=random_with_n_digits(4),
278-
sop_instance_uid=hd.UID(),
287+
sop_instance_uid=seg_sop_instance_uid,
279288
instance_number=1,
280289
manufacturer="The MONAI Consortium",
281290
manufacturer_model_name="MONAI Deploy App SDK",
282291
software_versions=version_str,
283292
device_serial_number="0000",
284293
)
285-
seg.save_as(file_path)
294+
295+
# Adding a few tags that are not in the Dataset
296+
# Also try to set the custom tags that are of string type
297+
dt_now = datetime.datetime.now()
298+
seg.SeriesDate = dt_now.strftime("%Y%m%d")
299+
seg.SeriesTime = dt_now.strftime("%H%M%S")
300+
seg.TimezoneOffsetFromUTC = (
301+
dt_now.astimezone().isoformat()[-6:].replace(":", "")
302+
) # '2022-09-27T22:36:20.143857-07:00'
303+
304+
if self._custom_tags:
305+
for k, v in self._custom_tags.items():
306+
if isinstance(k, str) and isinstance(v, str):
307+
try:
308+
seg[k].value = v
309+
except Exception as ex:
310+
# Best effort for now.
311+
logging.warning(f"Tag {k} was not written, due to {ex}")
312+
313+
seg.save_as(output_path)
286314

287315
try:
288316
# Test reading back
289-
_ = self._read_from_dcm(str(file_path))
317+
_ = self._read_from_dcm(str(output_path))
290318
except Exception as ex:
291319
print("DICOMSeg creation failed. Error:\n{}".format(ex))
292320
raise
@@ -356,8 +384,8 @@ def test():
356384
from monai.deploy.operators.dicom_series_to_volume_operator import DICOMSeriesToVolumeOperator
357385

358386
current_file_dir = Path(__file__).parent.resolve()
359-
data_path = current_file_dir.joinpath("../../../examples/ai_spleen_seg_data/dcm")
360-
out_path = current_file_dir.joinpath("../../../examples/output_seg_op/dcm_seg_test.dcm")
387+
data_path = current_file_dir.joinpath("../../../inputs/spleen_ct_tcia")
388+
out_dir = Path("output_seg_op").absolute()
361389
segment_descriptions = [
362390
SegmentDescription(
363391
segment_label="Spleen",
@@ -385,7 +413,7 @@ def test():
385413
# Very crude thresholding
386414
image_numpy = (image.asnumpy() > 400).astype(np.uint8)
387415

388-
seg_writer.create_dicom_seg(image_numpy, series, Path(out_path).absolute())
416+
seg_writer.create_dicom_seg(image_numpy, series, out_dir)
389417

390418
# Testing with the main entry functions
391419
study_list = loader.load_data_to_studies(data_path.absolute())
@@ -394,7 +422,7 @@ def test():
394422
# Very crude thresholding
395423
image_numpy = (image.asnumpy() > 400).astype(np.uint8)
396424
image = Image(image_numpy)
397-
seg_writer.process_images(image, study_selected_series_list, out_path.parent.absolute())
425+
seg_writer.process_images(image, study_selected_series_list, out_dir)
398426

399427

400428
if __name__ == "__main__":

monai/deploy/operators/monai_bundle_inference_operator.py

Lines changed: 33 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
import pickle
1616
import time
1717
import zipfile
18+
from copy import deepcopy
1819
from pathlib import Path
1920
from threading import Lock
2021
from typing import Any, Dict, List, Optional, Tuple, Type, Union
@@ -29,12 +30,19 @@
2930

3031
from .inference_operator import InferenceOperator
3132

33+
MONAI_UTILS = "monai.utils"
3234
nibabel, _ = optional_import("nibabel", "3.2.1")
33-
torch, _ = optional_import("torch", "1.10.0")
35+
torch, _ = optional_import("torch", "1.10.2")
3436

37+
NdarrayOrTensor, _ = optional_import("monai.config", name="NdarrayOrTensor")
38+
MetaTensor, _ = optional_import("monai.data.meta_tensor", name="MetaTensor")
3539
PostFix, _ = optional_import("monai.utils.enums", name="PostFix") # For the default meta_key_postfix
3640
first, _ = optional_import("monai.utils.misc", name="first")
37-
ensure_tuple, _ = optional_import("monai.utils", name="ensure_tuple")
41+
ensure_tuple, _ = optional_import(MONAI_UTILS, name="ensure_tuple")
42+
convert_to_dst_type, _ = optional_import(MONAI_UTILS, name="convert_to_dst_type")
43+
Key, _ = optional_import(MONAI_UTILS, name="ImageMetaKey")
44+
MetaKeys, _ = optional_import(MONAI_UTILS, name="MetaKeys")
45+
SpaceKeys, _ = optional_import(MONAI_UTILS, name="SpaceKeys")
3846
Compose_, _ = optional_import("monai.transforms", name="Compose")
3947
ConfigParser_, _ = optional_import("monai.bundle", name="ConfigParser")
4048
MapTransform_, _ = optional_import("monai.transforms", name="MapTransform")
@@ -45,6 +53,7 @@
4553
MapTransform: Any = MapTransform_
4654
ConfigParser: Any = ConfigParser_
4755

56+
4857
__all__ = ["MonaiBundleInferenceOperator", "IOMapping", "BundleConfigNames"]
4958

5059

@@ -198,7 +207,7 @@ def _ensure_str_list(config_names):
198207
# operator may choose to pass in a accessible bundle path at development and packaging stage. Ideally,
199208
# the bundle path should be passed in by the Packager, e.g. via env var, when the App is initialized.
200209
# As of now, the Packager only passes in the model path after the App including all operators are init'ed.
201-
@md.env(pip_packages=["monai==0.9.0", "torch>=1.10.02", "numpy>=1.21", "nibabel>=3.2.1"])
210+
@md.env(pip_packages=["monai>=1.0.0", "torch>=1.10.02", "numpy>=1.21", "nibabel>=3.2.1"])
202211
class MonaiBundleInferenceOperator(InferenceOperator):
203212
"""This inference operator automates the inference operation for a given MONAI Bundle.
204213
@@ -477,14 +486,19 @@ def compute(self, op_input: InputContext, op_output: OutputContext, context: Exe
477486

478487
start = time.time()
479488
for name in self._inputs.keys():
480-
value, metadata = self._receive_input(name, op_input, context)
489+
# Input MetaTensor creation is based on the same logic in monai LoadImage
490+
# value: NdarrayOrTensor # MyPy complaints
491+
value, meta_data = self._receive_input(name, op_input, context)
492+
value = convert_to_dst_type(value, dst=value)[0]
493+
if not isinstance(meta_data, dict):
494+
raise ValueError("`meta_data` must be a dict.")
495+
value = MetaTensor.ensure_torch_and_prune_meta(value, meta_data)
481496
inputs[name] = value
482-
if metadata:
483-
inputs[(f"{name}_{self._meta_key_postfix}")] = metadata
497+
# Named metadata dict not needed any more, as it is in the MetaTensor
484498

485499
inputs = self.pre_process(inputs)
486-
first_input = inputs.pop(first_input_name)[None].to(self._device) # select first input
487-
input_metadata = inputs.get(f"{first_input_name}_{self._meta_key_postfix}", None)
500+
first_input_v = inputs[first_input_name] # keep a copy of value for later use
501+
first_input = inputs.pop(first_input_name)[None].to(self._device)
488502

489503
# select other tensor inputs
490504
other_inputs = {k: v[None].to(self._device) for k, v in inputs.items() if isinstance(v, torch.Tensor)}
@@ -496,9 +510,10 @@ def compute(self, op_input: InputContext, op_output: OutputContext, context: Exe
496510
outputs: Any = self.predict(data=first_input, **other_inputs) # Use type Any to quiet MyPy complaints.
497511
logging.debug(f"Inference elapsed time (seconds): {time.time() - start}")
498512

499-
# TODO: Does this work for models where multiple outputs are returned?
500-
# Note that the inputs are needed because the invert transform requires it.
513+
# Note that the `inputs` are needed because the `invert` transform requires it. With metadata being
514+
# in the keyed MetaTensors of inputs, e.g. `image`, the whole inputs are needed.
501515
start = time.time()
516+
inputs[first_input_name] = first_input_v
502517
kw_args = {self.kw_preprocessed_inputs: inputs}
503518
outputs = self.post_process(ensure_tuple(outputs)[0], **kw_args)
504519
logging.debug(f"Post-processing elapsed time (seconds): {time.time() - start}")
@@ -512,7 +527,7 @@ def compute(self, op_input: InputContext, op_output: OutputContext, context: Exe
512527
for name in self._outputs.keys():
513528
# Note that the input metadata needs to be passed.
514529
# Please see the comments in the called function for the reasons.
515-
self._send_output(output_dict[name], name, input_metadata, op_output, context)
530+
self._send_output(output_dict[name], name, first_input_v.meta, op_output, context)
516531

517532
def predict(self, data: Any, *args, **kwargs) -> Union[Image, Any, Tuple[Any, ...], Dict[Any, Any]]:
518533
"""Predicts output using the inferer."""
@@ -698,7 +713,7 @@ def _convert_from_image_dicom_source(self, img: Image) -> Tuple[np.ndarray, Dict
698713
"""
699714

700715
img_meta_dict: Dict = img.metadata()
701-
meta_dict = {key: img_meta_dict[key] for key in img_meta_dict.keys()}
716+
meta_dict = deepcopy(img_meta_dict)
702717

703718
# The MONAI ImageReader, e.g. the ITKReader, arranges the image spatial dims in WHD,
704719
# so the "spacing" needs to be expressed in such an order too, as expected by the transforms.
@@ -709,18 +724,20 @@ def _convert_from_image_dicom_source(self, img: Image) -> Tuple[np.ndarray, Dict
709724
img_meta_dict["depth_pixel_spacing"],
710725
]
711726
)
712-
meta_dict["original_affine"] = np.asarray(img_meta_dict.get("nifti_affine_transform", None))
713-
meta_dict["affine"] = meta_dict["original_affine"]
727+
# Use defines MetaKeys directly
728+
meta_dict[MetaKeys.ORIGINAL_AFFINE] = np.asarray(img_meta_dict.get("nifti_affine_transform", None))
729+
meta_dict[MetaKeys.AFFINE] = meta_dict[MetaKeys.ORIGINAL_AFFINE].copy()
730+
meta_dict[MetaKeys.SPACE] = SpaceKeys.LPS # not using SpaceKeys.RAS or affine_lps_to_ras
714731

715732
# Similarly the Image ndarray has dim order DHW, to be rearranged to WHD.
716733
# TODO: Need to revisit this once multi-channel image is supported and the Image class itself
717734
# is enhanced to provide attributes or functions for channel and dim order details.
718735
converted_image = np.swapaxes(img.asnumpy(), 0, 2)
719736

720737
# The spatial shape is then that of the converted image, in WHD
721-
meta_dict["spatial_shape"] = np.asarray(converted_image.shape)
738+
meta_dict[MetaKeys.SPATIAL_SHAPE] = np.asarray(converted_image.shape)
722739

723740
# Well, now channel for now.
724-
meta_dict["original_channel_dim"] = "no_channel"
741+
meta_dict[MetaKeys.ORIGINAL_CHANNEL_DIM] = "no_channel"
725742

726743
return converted_image, meta_dict

0 commit comments

Comments
 (0)