diff --git a/monai/deploy/operators/dicom_seg_writer_operator.py b/monai/deploy/operators/dicom_seg_writer_operator.py index ca885613..79a72098 100644 --- a/monai/deploy/operators/dicom_seg_writer_operator.py +++ b/monai/deploy/operators/dicom_seg_writer_operator.py @@ -178,7 +178,7 @@ def _read_from_dcm(self, file_path: str): return dcmread(file_path) def select_input_file(self, input_folder, extensions=SUPPORTED_EXTENSIONS): - """Select the inut files based on supported extensions. + """Select the input files based on supported extensions. Args: input_folder (string): the path of the folder containing the input file(s) @@ -222,7 +222,7 @@ def _image_file_to_numpy(self, input_path: str): def _get_label_list(self, stringfied_list_of_labels: str = ""): """Parse the string to get the label list. - If empty string is provided, a list of a single element is retured. + If empty string is provided, a list of a single element is returned. Args: stringfied_list_of_labels (str): string representing the list of segmentation labels. @@ -299,7 +299,7 @@ def safe_get(ds, key): """Safely gets the tag value if present from the Dataset and logs failure. The safe get method of dict works for str, but not the hex key. The added - benefit of this funtion is that it logs the failure to get the keyed value. + benefit of this function is that it logs the failure to get the keyed value. Args: ds (Dataset): pydicom Dataset @@ -464,7 +464,7 @@ def create_label_segment(label, name): segments = Sequence() # Assumes the label starts at 1 and increment sequentially. - # TODO: This part needs to be more deteministic, e.g. with a dict. + # TODO: This part needs to be more deterministic, e.g. with a dict. for lb, name in enumerate(seg_labels, 1): segment = create_label_segment(lb, name) segments.append(segment) diff --git a/monai/deploy/operators/dicom_series_selector_operator.py b/monai/deploy/operators/dicom_series_selector_operator.py index 2f59b988..54efa671 100644 --- a/monai/deploy/operators/dicom_series_selector_operator.py +++ b/monai/deploy/operators/dicom_series_selector_operator.py @@ -24,7 +24,7 @@ @md.input("dicom_study_list", List[DICOMStudy], IOType.IN_MEMORY) -@md.input("selection_rules", Dict, IOType.IN_MEMORY) # This overides the rules in the instance. +@md.input("selection_rules", Dict, IOType.IN_MEMORY) # This overrides the rules in the instance. @md.output("study_selected_series_list", List[StudySelectedSeries], IOType.IN_MEMORY) class DICOMSeriesSelectorOperator(Operator): """This operator selects a list of DICOM Series in a DICOM Study for a given set of selection rules. diff --git a/monai/deploy/operators/dicom_text_sr_writer_operator.py b/monai/deploy/operators/dicom_text_sr_writer_operator.py index a80204d1..412d4e30 100644 --- a/monai/deploy/operators/dicom_text_sr_writer_operator.py +++ b/monai/deploy/operators/dicom_text_sr_writer_operator.py @@ -103,7 +103,7 @@ def __init__( *args, **kwargs, ): - """Class to write DICOM SR SOP Instance for AI textual result in memeory or in a file. + """Class to write DICOM SR SOP Instance for AI textual result in memory or in a file. Args: copy_tags (bool): True for copying DICOM attributes from a provided DICOMSeries. diff --git a/monai/deploy/operators/inference_operator.py b/monai/deploy/operators/inference_operator.py index a41b5f96..5a07b02a 100644 --- a/monai/deploy/operators/inference_operator.py +++ b/monai/deploy/operators/inference_operator.py @@ -51,7 +51,7 @@ def compute(self, op_input: InputContext, op_output: OutputContext, context: Exe @abstractmethod def predict(self, data: Any) -> Union[Image, Any]: - """Prdicts results using the models(s) with input tensors. + """Predicts results using the models(s) with input tensors. This method must be overridden by a derived class. diff --git a/monai/deploy/operators/monai_seg_inference_operator.py b/monai/deploy/operators/monai_seg_inference_operator.py index fd5d4e94..b606610c 100644 --- a/monai/deploy/operators/monai_seg_inference_operator.py +++ b/monai/deploy/operators/monai_seg_inference_operator.py @@ -1,4 +1,4 @@ -# Copyright 2021 MONAI Consortium +# Copyright 2021-2002 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -70,8 +70,8 @@ def __init__( Args: roi_size (Union[Sequence[int], int]): The tensor size used in inference. - pre_transforms (Compose): MONAI Compose oject used for pre-transforms. - post_transforms (Compose): MONAI Compose oject used for post-transforms. + pre_transforms (Compose): MONAI Compose object used for pre-transforms. + post_transforms (Compose): MONAI Compose object used for post-transforms. overlap (float): The overlap used in sliding window inference. """ @@ -132,7 +132,7 @@ def overlap(self, val: float): def _convert_dicom_metadata_datatype(self, metadata: Dict): """Converts metadata in pydicom types to the corresponding native types. - It is knwon that some values of the metadata are of the pydicom types, for images converted + It is known that some values of the metadata are of the pydicom types, for images converted from DICOM series. Need to use this function to convert the types with best effort and for the few knowns metadata attributes, until the following issue is addressed: https://github.com/Project-MONAI/monai-deploy-app-sdk/issues/185 @@ -264,7 +264,7 @@ def post_process(self, pre_transforms: Compose, out_dir: str = "./infer_out") -> raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") def predict(self, data: Any, *args, **kwargs) -> Union[Image, Any]: - """Prdicts results using the models(s) with input tensors. + """Predicts results using the models(s) with input tensors. This method must be overridden by a derived class. @@ -310,7 +310,7 @@ def get_data(self, input_image): A single image is loaded with a single set of metadata as of now. The App SDK Image asnumpy() function is expected to return a numpy array of index order `DHW`. - This is because in the DICOM serie to volume operator pydicom Dataset pixel_array is used to + This is because in the DICOM series to volume operator pydicom Dataset pixel_array is used to to get per instance pixel numpy array, with index order of `HW`. When all instances are stacked, along the first axis, the Image numpy array's index order is `DHW`. ITK array_view_from_image and SimpleITK GetArrayViewFromImage also returns a numpy array with the index order of `DHW`. @@ -332,7 +332,7 @@ def get_data(self, input_image): if not isinstance(i, Image): raise TypeError("Only object of Image type is supported.") - # The Image asnumpy() retruns NumPy array similar to ITK array_view_from_image + # The Image asnumpy() returns NumPy array similar to ITK array_view_from_image # The array then needs to be transposed, as does in MONAI ITKReader, to align # with the output from Nibabel reader loading NIfTI files. data = i.asnumpy().T diff --git a/monai/deploy/operators/png_converter_operator.py b/monai/deploy/operators/png_converter_operator.py index 0f01b1ac..89c9d35e 100644 --- a/monai/deploy/operators/png_converter_operator.py +++ b/monai/deploy/operators/png_converter_operator.py @@ -27,7 +27,7 @@ @md.env(pip_packages=["Pillow >= 8.0.0"]) class PNGConverterOperator(Operator): """ - This operator writes out a 3D Volumtric Image to disk in a slice by slice manner + This operator writes out a 3D Volumetric Image to disk in a slice by slice manner """ def compute(self, op_input: InputContext, op_output: OutputContext, context: ExecutionContext): diff --git a/monai/deploy/operators/stl_conversion_operator.py b/monai/deploy/operators/stl_conversion_operator.py index 6b7fdd6d..cfb62a90 100644 --- a/monai/deploy/operators/stl_conversion_operator.py +++ b/monai/deploy/operators/stl_conversion_operator.py @@ -137,7 +137,7 @@ def convert( """ Args: image (Image): object with the image (ndarray of DHW index order) and its metadata dictionary. - output_file (str): output stl file path. Default to None for not saving output file. + output_file (str): output STL file path. Default to None for not saving output file. class_id (array, optional): Class label id. Defaults to None. is_smooth (bool, optional): smoothing or not. Defaults to True. keep_largest_connected_component (bool, optional): Defaults to True. @@ -211,7 +211,7 @@ def convert( vert = itk_image.TransformContinuousIndexToPhysicalPoint(vert) verts[_j, :] = np.array(vert) - # Write out the stl file, and then load into trimesh + # Write out the STL file, and then load into trimesh try: temp_folder = tempfile.mkdtemp() raw_stl_filename = os.path.join(temp_folder, "temp.stl") @@ -297,7 +297,7 @@ def image_array(self): @property def itk_image(self): - """ITK image oject created from the encapsulated image object, or None""" + """ITK image object created from the encapsulated image object, or None""" return self._props.get("itk_image", None) @property @@ -307,12 +307,12 @@ def shape(self): @property def spacing(self): - """Pixel spacing of orignal image, aka resolution, or None""" + """Pixel spacing of original image, aka resolution, or None""" return self._props.get("spacing", None) @property def original_affine(self): - """Oringal affine of the image, or None""" + """Original affine of the image, or None""" return self._props.get("original_affine", None) @property @@ -377,7 +377,7 @@ def _load_data(self, image): return img_array, affine, original_affine, shape, spacing, itk_image def _read_from_in_mem_image(self, image): - """Parse the in-memory image for the attibutes. + """Parse the in-memory image for the attributes. Args: image (Image): App SDK Image instance. @@ -399,7 +399,7 @@ def _read_from_in_mem_image(self, image): if num_dims == 5: img_array = np.squeeze(img_array) if len(img_array.shape) != 4: - raise ValueError("Cannot squeeze 5D image to 4D; oject doesn't support time based data.") + raise ValueError("Cannot squeeze 5D image to 4D; object doesn't support time based data.") if self.is_channels_first: self._logger.info("4D image, channel first") diff --git a/notebooks/tutorials/03_segmentation_app.ipynb b/notebooks/tutorials/03_segmentation_app.ipynb index cfeb7c72..8e37d993 100644 --- a/notebooks/tutorials/03_segmentation_app.ipynb +++ b/notebooks/tutorials/03_segmentation_app.ipynb @@ -109,6 +109,13 @@ "!python -c \"import monai.deploy\" || pip install --upgrade -q \"monai-deploy-app-sdk\"" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note: you may need to restart the Jupyter kernel to use the updated packages." + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/notebooks/tutorials/03_segmentation_viz_app.ipynb b/notebooks/tutorials/03_segmentation_viz_app.ipynb index 5da22d83..d4c56a99 100644 --- a/notebooks/tutorials/03_segmentation_viz_app.ipynb +++ b/notebooks/tutorials/03_segmentation_viz_app.ipynb @@ -112,6 +112,13 @@ "!python -c \"import clara.viz\" || pip install --upgrade -q \"clara-viz\"" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note: you may need to restart the Jupyter kernel to use the updated packages." + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/notebooks/tutorials/05_full_tutorial.ipynb b/notebooks/tutorials/05_full_tutorial.ipynb index 766345ac..b14d9658 100644 --- a/notebooks/tutorials/05_full_tutorial.ipynb +++ b/notebooks/tutorials/05_full_tutorial.ipynb @@ -101,6 +101,13 @@ "!python -c \"import monai.deploy\" || pip install --upgrade -q \"monai-deploy-app-sdk\"" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note: you may need to restart the Jupyter kernel to use the updated packages." + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -2106,7 +2113,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.7.5" } }, "nbformat": 4,