Skip to content

Commit e9a0965

Browse files
authored
Mqin/fix doc typos (#274)
* Fixed typo in inline comments. Signed-off-by: mmelqin <mingmelvinq@nvidia.com> * Checked and fixed typo for all Operators Signed-off-by: mmelqin <mingmelvinq@nvidia.com> * Added notes in the Jupyter notebooks Signed-off-by: mmelqin <mingmelvinq@nvidia.com>
1 parent 8ec336f commit e9a0965

10 files changed

+44
-23
lines changed

monai/deploy/operators/dicom_seg_writer_operator.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ def _read_from_dcm(self, file_path: str):
178178
return dcmread(file_path)
179179

180180
def select_input_file(self, input_folder, extensions=SUPPORTED_EXTENSIONS):
181-
"""Select the inut files based on supported extensions.
181+
"""Select the input files based on supported extensions.
182182
183183
Args:
184184
input_folder (string): the path of the folder containing the input file(s)
@@ -222,7 +222,7 @@ def _image_file_to_numpy(self, input_path: str):
222222
def _get_label_list(self, stringfied_list_of_labels: str = ""):
223223
"""Parse the string to get the label list.
224224
225-
If empty string is provided, a list of a single element is retured.
225+
If empty string is provided, a list of a single element is returned.
226226
227227
Args:
228228
stringfied_list_of_labels (str): string representing the list of segmentation labels.
@@ -299,7 +299,7 @@ def safe_get(ds, key):
299299
"""Safely gets the tag value if present from the Dataset and logs failure.
300300
301301
The safe get method of dict works for str, but not the hex key. The added
302-
benefit of this funtion is that it logs the failure to get the keyed value.
302+
benefit of this function is that it logs the failure to get the keyed value.
303303
304304
Args:
305305
ds (Dataset): pydicom Dataset
@@ -464,7 +464,7 @@ def create_label_segment(label, name):
464464

465465
segments = Sequence()
466466
# Assumes the label starts at 1 and increment sequentially.
467-
# TODO: This part needs to be more deteministic, e.g. with a dict.
467+
# TODO: This part needs to be more deterministic, e.g. with a dict.
468468
for lb, name in enumerate(seg_labels, 1):
469469
segment = create_label_segment(lb, name)
470470
segments.append(segment)

monai/deploy/operators/dicom_series_selector_operator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424

2525

2626
@md.input("dicom_study_list", List[DICOMStudy], IOType.IN_MEMORY)
27-
@md.input("selection_rules", Dict, IOType.IN_MEMORY) # This overides the rules in the instance.
27+
@md.input("selection_rules", Dict, IOType.IN_MEMORY) # This overrides the rules in the instance.
2828
@md.output("study_selected_series_list", List[StudySelectedSeries], IOType.IN_MEMORY)
2929
class DICOMSeriesSelectorOperator(Operator):
3030
"""This operator selects a list of DICOM Series in a DICOM Study for a given set of selection rules.

monai/deploy/operators/dicom_text_sr_writer_operator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ def __init__(
103103
*args,
104104
**kwargs,
105105
):
106-
"""Class to write DICOM SR SOP Instance for AI textual result in memeory or in a file.
106+
"""Class to write DICOM SR SOP Instance for AI textual result in memory or in a file.
107107
108108
Args:
109109
copy_tags (bool): True for copying DICOM attributes from a provided DICOMSeries.

monai/deploy/operators/inference_operator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def compute(self, op_input: InputContext, op_output: OutputContext, context: Exe
5151

5252
@abstractmethod
5353
def predict(self, data: Any) -> Union[Image, Any]:
54-
"""Prdicts results using the models(s) with input tensors.
54+
"""Predicts results using the models(s) with input tensors.
5555
5656
This method must be overridden by a derived class.
5757

monai/deploy/operators/monai_seg_inference_operator.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright 2021 MONAI Consortium
1+
# Copyright 2021-2002 MONAI Consortium
22
# Licensed under the Apache License, Version 2.0 (the "License");
33
# you may not use this file except in compliance with the License.
44
# You may obtain a copy of the License at
@@ -70,8 +70,8 @@ def __init__(
7070
7171
Args:
7272
roi_size (Union[Sequence[int], int]): The tensor size used in inference.
73-
pre_transforms (Compose): MONAI Compose oject used for pre-transforms.
74-
post_transforms (Compose): MONAI Compose oject used for post-transforms.
73+
pre_transforms (Compose): MONAI Compose object used for pre-transforms.
74+
post_transforms (Compose): MONAI Compose object used for post-transforms.
7575
overlap (float): The overlap used in sliding window inference.
7676
"""
7777

@@ -132,7 +132,7 @@ def overlap(self, val: float):
132132
def _convert_dicom_metadata_datatype(self, metadata: Dict):
133133
"""Converts metadata in pydicom types to the corresponding native types.
134134
135-
It is knwon that some values of the metadata are of the pydicom types, for images converted
135+
It is known that some values of the metadata are of the pydicom types, for images converted
136136
from DICOM series. Need to use this function to convert the types with best effort and for
137137
the few knowns metadata attributes, until the following issue is addressed:
138138
https://github.com/Project-MONAI/monai-deploy-app-sdk/issues/185
@@ -264,7 +264,7 @@ def post_process(self, pre_transforms: Compose, out_dir: str = "./infer_out") ->
264264
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
265265

266266
def predict(self, data: Any, *args, **kwargs) -> Union[Image, Any]:
267-
"""Prdicts results using the models(s) with input tensors.
267+
"""Predicts results using the models(s) with input tensors.
268268
269269
This method must be overridden by a derived class.
270270
@@ -310,7 +310,7 @@ def get_data(self, input_image):
310310
A single image is loaded with a single set of metadata as of now.
311311
312312
The App SDK Image asnumpy() function is expected to return a numpy array of index order `DHW`.
313-
This is because in the DICOM serie to volume operator pydicom Dataset pixel_array is used to
313+
This is because in the DICOM series to volume operator pydicom Dataset pixel_array is used to
314314
to get per instance pixel numpy array, with index order of `HW`. When all instances are stacked,
315315
along the first axis, the Image numpy array's index order is `DHW`. ITK array_view_from_image
316316
and SimpleITK GetArrayViewFromImage also returns a numpy array with the index order of `DHW`.
@@ -332,7 +332,7 @@ def get_data(self, input_image):
332332
if not isinstance(i, Image):
333333
raise TypeError("Only object of Image type is supported.")
334334

335-
# The Image asnumpy() retruns NumPy array similar to ITK array_view_from_image
335+
# The Image asnumpy() returns NumPy array similar to ITK array_view_from_image
336336
# The array then needs to be transposed, as does in MONAI ITKReader, to align
337337
# with the output from Nibabel reader loading NIfTI files.
338338
data = i.asnumpy().T

monai/deploy/operators/png_converter_operator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
@md.env(pip_packages=["Pillow >= 8.0.0"])
2828
class PNGConverterOperator(Operator):
2929
"""
30-
This operator writes out a 3D Volumtric Image to disk in a slice by slice manner
30+
This operator writes out a 3D Volumetric Image to disk in a slice by slice manner
3131
"""
3232

3333
def compute(self, op_input: InputContext, op_output: OutputContext, context: ExecutionContext):

monai/deploy/operators/stl_conversion_operator.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ def convert(
137137
"""
138138
Args:
139139
image (Image): object with the image (ndarray of DHW index order) and its metadata dictionary.
140-
output_file (str): output stl file path. Default to None for not saving output file.
140+
output_file (str): output STL file path. Default to None for not saving output file.
141141
class_id (array, optional): Class label id. Defaults to None.
142142
is_smooth (bool, optional): smoothing or not. Defaults to True.
143143
keep_largest_connected_component (bool, optional): Defaults to True.
@@ -211,7 +211,7 @@ def convert(
211211
vert = itk_image.TransformContinuousIndexToPhysicalPoint(vert)
212212
verts[_j, :] = np.array(vert)
213213

214-
# Write out the stl file, and then load into trimesh
214+
# Write out the STL file, and then load into trimesh
215215
try:
216216
temp_folder = tempfile.mkdtemp()
217217
raw_stl_filename = os.path.join(temp_folder, "temp.stl")
@@ -297,7 +297,7 @@ def image_array(self):
297297

298298
@property
299299
def itk_image(self):
300-
"""ITK image oject created from the encapsulated image object, or None"""
300+
"""ITK image object created from the encapsulated image object, or None"""
301301
return self._props.get("itk_image", None)
302302

303303
@property
@@ -307,12 +307,12 @@ def shape(self):
307307

308308
@property
309309
def spacing(self):
310-
"""Pixel spacing of orignal image, aka resolution, or None"""
310+
"""Pixel spacing of original image, aka resolution, or None"""
311311
return self._props.get("spacing", None)
312312

313313
@property
314314
def original_affine(self):
315-
"""Oringal affine of the image, or None"""
315+
"""Original affine of the image, or None"""
316316
return self._props.get("original_affine", None)
317317

318318
@property
@@ -377,7 +377,7 @@ def _load_data(self, image):
377377
return img_array, affine, original_affine, shape, spacing, itk_image
378378

379379
def _read_from_in_mem_image(self, image):
380-
"""Parse the in-memory image for the attibutes.
380+
"""Parse the in-memory image for the attributes.
381381
382382
Args:
383383
image (Image): App SDK Image instance.
@@ -399,7 +399,7 @@ def _read_from_in_mem_image(self, image):
399399
if num_dims == 5:
400400
img_array = np.squeeze(img_array)
401401
if len(img_array.shape) != 4:
402-
raise ValueError("Cannot squeeze 5D image to 4D; oject doesn't support time based data.")
402+
raise ValueError("Cannot squeeze 5D image to 4D; object doesn't support time based data.")
403403

404404
if self.is_channels_first:
405405
self._logger.info("4D image, channel first")

notebooks/tutorials/03_segmentation_app.ipynb

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,13 @@
109109
"!python -c \"import monai.deploy\" || pip install --upgrade -q \"monai-deploy-app-sdk\""
110110
]
111111
},
112+
{
113+
"cell_type": "markdown",
114+
"metadata": {},
115+
"source": [
116+
"Note: you may need to restart the Jupyter kernel to use the updated packages."
117+
]
118+
},
112119
{
113120
"cell_type": "markdown",
114121
"metadata": {},

notebooks/tutorials/03_segmentation_viz_app.ipynb

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,13 @@
112112
"!python -c \"import clara.viz\" || pip install --upgrade -q \"clara-viz\""
113113
]
114114
},
115+
{
116+
"cell_type": "markdown",
117+
"metadata": {},
118+
"source": [
119+
"Note: you may need to restart the Jupyter kernel to use the updated packages."
120+
]
121+
},
115122
{
116123
"cell_type": "markdown",
117124
"metadata": {},

notebooks/tutorials/05_full_tutorial.ipynb

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,13 @@
101101
"!python -c \"import monai.deploy\" || pip install --upgrade -q \"monai-deploy-app-sdk\""
102102
]
103103
},
104+
{
105+
"cell_type": "markdown",
106+
"metadata": {},
107+
"source": [
108+
"Note: you may need to restart the Jupyter kernel to use the updated packages."
109+
]
110+
},
104111
{
105112
"cell_type": "markdown",
106113
"metadata": {},
@@ -2106,7 +2113,7 @@
21062113
"name": "python",
21072114
"nbconvert_exporter": "python",
21082115
"pygments_lexer": "ipython3",
2109-
"version": "3.8.10"
2116+
"version": "3.7.5"
21102117
}
21112118
},
21122119
"nbformat": 4,

0 commit comments

Comments
 (0)