Skip to content

Commit dbbc231

Browse files
committed
pylint & update model api version in docstring
1 parent c6df647 commit dbbc231

File tree

8 files changed

+41
-44
lines changed

8 files changed

+41
-44
lines changed

src/otx/algorithms/visual_prompting/adapters/openvino/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,4 +14,4 @@
1414
# See the License for the specific language governing permissions
1515
# and limitations under the License.
1616

17-
from .model_wrappers import *
17+
from .model_wrappers import * # noqa: F403

src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,5 +14,5 @@
1414
# See the License for the specific language governing permissions
1515
# and limitations under the License.
1616

17-
from .openvino_models import ImageEncoder, Decoder # noqa: F401
1817
from .openvino_adapters import VisualPromptingOpenvinoAdapter # noqa: F401
18+
from .openvino_models import Decoder, ImageEncoder # noqa: F401

src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_adapters.py

+10-20
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,7 @@
2525

2626

2727
def resize_image_with_aspect_pad(input: Output, size, keep_aspect_ratio, interpolation, pad_value):
28-
"""https://github.com/openvinotoolkit/model_api/blob/master/model_api/python/openvino/model_api/adapters/utils.py#L273-L341
29-
"""
28+
"""https://github.com/openvinotoolkit/model_api/blob/0.1.3/model_api/python/openvino/model_api/adapters/utils.py#L273-L341."""
3029
h_axis = 1
3130
w_axis = 2
3231
w, h = size
@@ -46,12 +45,8 @@ def resize_image_with_aspect_pad(input: Output, size, keep_aspect_ratio, interpo
4645
w_ratio = opset.divide(np.float32(w), iw)
4746
h_ratio = opset.divide(np.float32(h), ih)
4847
scale = opset.minimum(w_ratio, h_ratio)
49-
nw = opset.convert(
50-
opset.round(opset.multiply(iw, scale), "half_to_even"), destination_type="i32"
51-
)
52-
nh = opset.convert(
53-
opset.round(opset.multiply(ih, scale), "half_to_even"), destination_type="i32"
54-
)
48+
nw = opset.convert(opset.round(opset.multiply(iw, scale), "half_to_even"), destination_type="i32")
49+
nh = opset.convert(opset.round(opset.multiply(ih, scale), "half_to_even"), destination_type="i32")
5550
new_size = opset.concat([opset.unsqueeze(nh, 0), opset.unsqueeze(nw, 0)], axis=0)
5651
image = opset.interpolate(
5752
input,
@@ -84,8 +79,7 @@ def resize_image_with_aspect_pad(input: Output, size, keep_aspect_ratio, interpo
8479

8580

8681
def resize_image_with_aspect(size, interpolation, pad_value):
87-
"""https://github.com/openvinotoolkit/model_api/blob/master/model_api/python/openvino/model_api/adapters/utils.py#L356-L365
88-
"""
82+
"""https://github.com/openvinotoolkit/model_api/blob/0.1.3/model_api/python/openvino/model_api/adapters/utils.py#L356-L365."""
8983
return custom_preprocess_function(
9084
partial(
9185
resize_image_with_aspect_pad,
@@ -99,10 +93,11 @@ def resize_image_with_aspect(size, interpolation, pad_value):
9993

10094
class VisualPromptingOpenvinoAdapter(OpenvinoAdapter):
10195
"""Openvino Adapter Wrappers of OTX Visual Prompting.
102-
96+
10397
This class is to use fixed `fit_to_window` resize module.
10498
When model API version in otx is upgraded, it can be removed.
10599
"""
100+
106101
def embed_preprocessing(
107102
self,
108103
layout,
@@ -116,17 +111,14 @@ def embed_preprocessing(
116111
scale=None,
117112
input_idx=0,
118113
):
119-
"""https://github.com/openvinotoolkit/model_api/blob/master/model_api/python/openvino/model_api/adapters/openvino_adapter.py#L340-L411
120-
"""
121-
ppp = PrePostProcessor(self.model)
114+
"""https://github.com/openvinotoolkit/model_api/blob/0.1.3/model_api/python/openvino/model_api/adapters/openvino_adapter.py#L340-L411."""
115+
ppp = PrePostProcessor(self.model) # type: ignore[has-type]
122116

123117
# Change the input type to the 8-bit image
124118
if dtype == type(int):
125119
ppp.input(input_idx).tensor().set_element_type(Type.u8)
126120

127-
ppp.input(input_idx).tensor().set_layout(ov.Layout("NHWC")).set_color_format(
128-
ColorFormat.BGR
129-
)
121+
ppp.input(input_idx).tensor().set_layout(ov.Layout("NHWC")).set_color_format(ColorFormat.BGR)
130122

131123
INTERPOLATION_MODE_MAP = {
132124
"LINEAR": "linear",
@@ -152,9 +144,7 @@ def embed_preprocessing(
152144
)
153145

154146
else:
155-
raise ValueError(
156-
f"Upsupported resize type in model preprocessing: {resize_mode}"
157-
)
147+
raise ValueError(f"Upsupported resize type in model preprocessing: {resize_mode}")
158148

159149
# Handle layout
160150
ppp.input(input_idx).model().set_layout(ov.Layout(layout))

src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py

+15-13
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
import cv2
2121
import numpy as np
2222
from openvino.model_api.adapters.inference_adapter import InferenceAdapter
23-
from openvino.model_api.models import DetectionModel, ImageModel
23+
from openvino.model_api.models import ImageModel
2424
from openvino.model_api.models.types import (
2525
BooleanValue,
2626
ListValue,
@@ -96,18 +96,20 @@ def preprocess(self, inputs: Dict[str, Any], meta: Dict[str, Any]):
9696
# TODO (sungchul): add condition to check whether using bbox or point
9797
point_coords = self._apply_coords(bbox.reshape(-1, 2, 2), inputs["original_size"])
9898
point_labels = np.array([2, 3], dtype=np.float32).reshape((-1, 2))
99-
processed_prompts.append({
100-
"point_coords": point_coords,
101-
"point_labels": point_labels,
102-
# TODO (sungchul): how to generate mask_input and has_mask_input
103-
"mask_input": np.zeros((1, 1, 256, 256), dtype=np.float32),
104-
"has_mask_input": np.zeros((1, 1), dtype=np.float32),
105-
"orig_size": np.array(inputs["original_size"], dtype=np.float32).reshape((-1, 2)),
106-
"label": label
107-
})
99+
processed_prompts.append(
100+
{
101+
"point_coords": point_coords,
102+
"point_labels": point_labels,
103+
# TODO (sungchul): how to generate mask_input and has_mask_input
104+
"mask_input": np.zeros((1, 1, 256, 256), dtype=np.float32),
105+
"has_mask_input": np.zeros((1, 1), dtype=np.float32),
106+
"orig_size": np.array(inputs["original_size"], dtype=np.float32).reshape((-1, 2)),
107+
"label": label,
108+
}
109+
)
108110
return processed_prompts
109111

110-
def _apply_coords(self, coords: np.ndarray, original_size: Union[List[int], Tuple[int]]) -> np.ndarray:
112+
def _apply_coords(self, coords: np.ndarray, original_size: Union[List[int], Tuple[int, int]]) -> np.ndarray:
111113
"""Process coords according to preprocessed image size using image meta."""
112114
old_h, old_w = original_size
113115
new_h, new_w = self._get_preprocess_shape(original_size[0], original_size[1], self.image_size)
@@ -123,7 +125,7 @@ def _get_preprocess_shape(self, old_h: int, old_w: int, image_size: int) -> Tupl
123125
new_w = int(new_w + 0.5)
124126
new_h = int(new_h + 0.5)
125127
return (new_h, new_w)
126-
128+
127129
def _check_io_number(self, number_of_inputs, number_of_outputs):
128130
pass
129131

@@ -146,7 +148,7 @@ def postprocess(self, outputs: Dict[str, np.ndarray], meta: Dict[str, Any]) -> T
146148
"""
147149

148150
def sigmoid(x):
149-
return np.tanh(x * 0.5) * 0.5 + 0.5 # to avoid overflow
151+
return np.tanh(x * 0.5) * 0.5 + 0.5 # to avoid overflow
150152

151153
soft_prediction = outputs[self.output_blob_name].squeeze()
152154
soft_prediction = self.resize_and_crop(soft_prediction, meta["original_size"][0])

src/otx/algorithms/visual_prompting/configs/base/configuration.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,12 @@
1919

2020
from otx.algorithms.common.configs import BaseConfig
2121
from otx.api.configuration.elements import (
22+
ParameterGroup,
2223
add_parameter_group,
24+
configurable_boolean,
2325
configurable_float,
2426
configurable_integer,
2527
string_attribute,
26-
configurable_boolean
2728
)
2829
from otx.api.configuration.model_lifecycle import ModelLifecycle
2930

@@ -41,7 +42,7 @@ class __LearningParameters(BaseConfig.BaseLearningParameters):
4142
description = header
4243

4344
@attrs
44-
class __Postprocessing:
45+
class __Postprocessing(ParameterGroup):
4546
header = string_attribute("Postprocessing")
4647
description = header
4748

@@ -86,7 +87,7 @@ class __Postprocessing:
8687
default_value=64,
8788
affects_outcome_of=ModelLifecycle.INFERENCE,
8889
)
89-
90+
9091
orig_height = configurable_integer(
9192
header="Original height",
9293
description="Model input height before embedding processing.",

src/otx/algorithms/visual_prompting/tasks/openvino.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,12 @@
2424

2525
import attr
2626
import numpy as np
27-
from openvino.model_api.adapters import OpenvinoAdapter, create_core
27+
from openvino.model_api.adapters import create_core
2828
from openvino.model_api.models import Model
2929

3030
from otx.algorithms.common.utils.logger import get_logger
3131
from otx.algorithms.common.utils.utils import get_default_async_reqs_num
32+
from otx.algorithms.visual_prompting.adapters.openvino import model_wrappers
3233
from otx.algorithms.visual_prompting.adapters.openvino.model_wrappers import (
3334
VisualPromptingOpenvinoAdapter,
3435
)

tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -101,19 +101,20 @@ def test_apply_coords(self):
101101
results = self.decoder._apply_coords(coords, original_size)
102102

103103
assert results.shape == (1, 2, 2)
104-
assert np.all(results == np.array([[[0.5, 0.5], [1., 1.]]]))
104+
assert np.all(results == np.array([[[0.5, 0.5], [1.0, 1.0]]]))
105105

106106
@e2e_pytest_unit
107-
@pytest.mark.parametrize("old_h,old_w,image_size,expected",
107+
@pytest.mark.parametrize(
108+
"old_h,old_w,image_size,expected",
108109
[
109110
(4, 3, 6, (6, 5)),
110111
(3, 4, 6, (5, 6)),
111-
]
112+
],
112113
)
113114
def test_get_preprocess_shape(self, old_h: int, old_w: int, image_size: int, expected: Tuple[int]):
114115
"""Test _get_preprocess_shape."""
115116
result = self.decoder._get_preprocess_shape(old_h, old_w, image_size)
116-
117+
117118
assert result == expected
118119

119120
@e2e_pytest_unit

tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -286,7 +286,9 @@ def test_load_checkpoint_from_local_checkpoint(self, mocker, monkeypatch, checkp
286286
)
287287
mocker.patch("builtins.open").__enter__.return_value = True
288288
mocker.patch("torch.load", return_value=OrderedDict())
289-
mocker_load_from_checkpoint = mocker.patch("otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.load_from_checkpoint")
289+
mocker_load_from_checkpoint = mocker.patch(
290+
"otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.load_from_checkpoint"
291+
)
290292
mocker_load_state_dict = mocker.patch(
291293
"otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.load_state_dict"
292294
)

0 commit comments

Comments
 (0)