Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix yolox export perf degradation #3534

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions src/otx/algo/detection/backbones/csp_darknet.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,19 @@ def forward(self, x: Tensor) -> Tensor:
)
return self.conv(x)

def export(self, x: Tensor) -> Tensor:
"""Forward for export."""
# shape of x (b,c,w,h) -> y(b,4c,w/2,h/2)
b, c, h, w = x.shape
x = x.reshape(b, c, -1, 2, w)
x = x.reshape(b, c, x.shape[2], 2, -1, 2)
half_h = x.shape[2]
half_w = x.shape[4]
x = x.permute(0, 5, 3, 1, 2, 4)
x = x.reshape(b, c * 4, half_h, half_w)

return self.conv(x)


class SPPBottleneck(BaseModule):
"""Spatial pyramid pooling layer used in YOLOv3-SPP.
Expand Down
32 changes: 32 additions & 0 deletions src/otx/algo/detection/yolox.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,12 @@
from otx.core.exporter.base import OTXModelExporter
from otx.core.exporter.native import OTXNativeModelExporter
from otx.core.model.detection import ExplainableOTXDetModel
from otx.core.types.export import OTXExportFormatType
from otx.core.types.precision import OTXPrecisionType

if TYPE_CHECKING:
from pathlib import Path

from torch import Tensor, nn


Expand Down Expand Up @@ -198,6 +202,34 @@ def _exporter(self) -> OTXModelExporter:
output_names=["bboxes", "labels", "feature_vector", "saliency_map"] if self.explain_mode else None,
)

def export(
self,
output_dir: Path,
base_name: str,
export_format: OTXExportFormatType,
precision: OTXPrecisionType = OTXPrecisionType.FP32,
) -> Path:
"""Export this model to the specified output directory.

This is required to patch otx.algo.detection.backbones.csp_darknet.Focus.forward to export forward.

Args:
output_dir (Path): directory for saving the exported model
base_name: (str): base name for the exported model file. Extension is defined by the target export format
export_format (OTXExportFormatType): format of the output model
precision (OTXExportPrecisionType): precision of the output model

Returns:
Path: path to the exported model.
"""
# patch otx.algo.detection.backbones.csp_darknet.Focus.forward
orig_focus_forward = self.model.backbone.stem.forward
try:
self.model.backbone.stem.forward = self.model.backbone.stem.export
return super().export(output_dir, base_name, export_format, precision)
finally:
self.model.backbone.stem.forward = orig_focus_forward

def forward_for_tracing(self, inputs: Tensor) -> list[InstanceData]:
"""Forward function for export."""
shape = (int(inputs.shape[2]), int(inputs.shape[3]))
Expand Down
12 changes: 7 additions & 5 deletions src/otx/engine/utils/auto_configurator.py
Original file line number Diff line number Diff line change
Expand Up @@ -378,18 +378,20 @@ def update_ov_subset_pipeline(self, datamodule: OTXDataModule, subset: str = "te
OTXDataModule: The modified OTXDataModule object with OpenVINO subset transforms applied.
"""
data_configuration = datamodule.config
ov_test_config = self._load_default_config(model_name="openvino_model")["data"]["config"][f"{subset}_subset"]
ov_config = self._load_default_config(model_name="openvino_model")["data"]["config"]
subset_config = getattr(data_configuration, f"{subset}_subset")
subset_config.batch_size = ov_test_config["batch_size"]
subset_config.transform_lib_type = ov_test_config["transform_lib_type"]
subset_config.transforms = ov_test_config["transforms"]
subset_config.to_tv_image = ov_test_config["to_tv_image"]
subset_config.batch_size = ov_config[f"{subset}_subset"]["batch_size"]
subset_config.transform_lib_type = ov_config[f"{subset}_subset"]["transform_lib_type"]
subset_config.transforms = ov_config[f"{subset}_subset"]["transforms"]
subset_config.to_tv_image = ov_config[f"{subset}_subset"]["to_tv_image"]
data_configuration.image_color_channel = ov_config["image_color_channel"]
data_configuration.tile_config.enable_tiler = False
msg = (
f"For OpenVINO IR models, Update the following {subset} \n"
f"\t transforms: {subset_config.transforms} \n"
f"\t transform_lib_type: {subset_config.transform_lib_type} \n"
f"\t batch_size: {subset_config.batch_size} \n"
f"\t image_color_channel: {data_configuration.image_color_channel} \n"
"And the tiler is disabled."
)
warn(msg, stacklevel=1)
Expand Down
15 changes: 14 additions & 1 deletion tests/unit/algo/detection/backbones/test_csp_darknet.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

import pytest
import torch
from otx.algo.detection.backbones.csp_darknet import CSPDarknet
from otx.algo.detection.backbones.csp_darknet import CSPDarknet, Focus
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm

Expand All @@ -31,6 +31,19 @@ def is_norm(modules):
return False


class TestFocus:
def test_export(self) -> None:
focus_model = Focus(3, 32)
focus_model.requires_grad_(False)
focus_model.cpu().eval()

x = torch.rand(1, 3, 128, 128)

results = focus_model.forward(x)

assert results.shape == (1, 32, 64, 64)


class TestCSPDarknet:
def test_init_with_large_frozen_stages(self) -> None:
"""Test __init__ with large frozen_stages."""
Expand Down
Loading