Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Enhancement] upgrade isort in pre-commit config #141

Merged
merged 3 commits into from
Feb 9, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions .isort.cfg

This file was deleted.

8 changes: 2 additions & 6 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,8 @@ repos:
rev: 4.0.1
hooks:
- id: flake8
- repo: https://github.com/asottile/seed-isort-config
rev: v2.2.0
hooks:
- id: seed-isort-config
- repo: https://github.com/timothycrosley/isort
rev: 4.3.21
- repo: https://github.com/PyCQA/isort
rev: 5.10.1
hooks:
- id: isort
- repo: https://github.com/pre-commit/mirrors-yapf
Expand Down
4 changes: 2 additions & 2 deletions mmdeploy/apis/ncnn/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,6 @@
__all__ = ['is_available', 'is_plugin_available']

if is_available():
from mmdeploy.backend.ncnn.onnx2ncnn import (onnx2ncnn,
get_output_model_file)
from mmdeploy.backend.ncnn.onnx2ncnn import (get_output_model_file,
onnx2ncnn)
__all__ += ['onnx2ncnn', 'get_output_model_file']
4 changes: 2 additions & 2 deletions mmdeploy/apis/openvino/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
__all__ = ['is_available']

if is_available():
from mmdeploy.backend.openvino.onnx2openvino \
import onnx2openvino, get_output_model_file
from mmdeploy.backend.openvino.onnx2openvino import (get_output_model_file,
onnx2openvino)
from .utils import get_input_info_from_cfg
__all__ += [
'onnx2openvino', 'get_output_model_file', 'get_input_info_from_cfg'
Expand Down
2 changes: 1 addition & 1 deletion mmdeploy/backend/openvino/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,6 @@ def is_available() -> bool:


if is_available():
from .wrapper import OpenVINOWrapper
from .onnx2openvino import get_output_model_file
from .wrapper import OpenVINOWrapper
__all__ = ['OpenVINOWrapper', 'get_output_model_file']
1 change: 1 addition & 0 deletions mmdeploy/codebase/mmcls/deploy/classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,7 @@ def evaluate_outputs(model_cfg: mmcv.Config,
Defaults to `None` and the results will only print on stdout.
"""
import warnings

from mmcv.utils import get_logger
logger = get_logger('test', log_file=log_file, log_level=logging.INFO)

Expand Down
2 changes: 1 addition & 1 deletion mmdeploy/codebase/mmdet/deploy/object_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,8 +109,8 @@ def create_input(self,
Returns:
tuple: (data, img), meta information for the input image and input.
"""
from mmdet.datasets.pipelines import Compose
from mmcv.parallel import collate, scatter
from mmdet.datasets.pipelines import Compose
if not isinstance(imgs, (list, tuple)):
imgs = [imgs]
dynamic_flag = is_dynamic_shape(self.deploy_cfg)
Expand Down
1 change: 1 addition & 0 deletions mmdeploy/codebase/mmdet/deploy/object_detection_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -399,6 +399,7 @@ def __init__(self, backend: Backend, backend_files: Sequence[str],
super().__init__(backend, backend_files, device, class_names,
deploy_cfg, **kwargs)
from mmdet.models.builder import build_head, build_roi_extractor

from ..models.roi_heads.bbox_head import bbox_head__get_bboxes

self.bbox_roi_extractor = build_roi_extractor(
Expand Down
2 changes: 1 addition & 1 deletion mmdeploy/codebase/mmdet/models/dense_heads/yolox_head.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,8 +139,8 @@ def yolox_head__get_bboxes__ncnn(ctx,
output__ncnn (Tensor): outputs, shape is [N, num_det, 6].
"""
from mmdeploy.codebase.mmdet.core.ops import ncnn_detection_output_forward
from mmdeploy.utils.config_utils import is_dynamic_shape
from mmdeploy.utils import get_root_logger
from mmdeploy.utils.config_utils import is_dynamic_shape
dynamic_flag = is_dynamic_shape(ctx.cfg)
if dynamic_flag:
logger = get_root_logger()
Expand Down
1 change: 1 addition & 0 deletions mmdeploy/codebase/mmedit/deploy/mmediting.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ def build_dataset(dataset_cfg: Union[str, mmcv.Config], *args,
Dataset: A PyTorch dataset.
"""
from mmedit.datasets import build_dataset as build_dataset_mmedit

from mmdeploy.utils import load_config
dataset_cfg = load_config(dataset_cfg)[0]
data = dataset_cfg.data
Expand Down
1 change: 1 addition & 0 deletions mmdeploy/codebase/mmocr/deploy/mmocr.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,7 @@ def single_gpu_test(model: torch.nn.Module,
list: The prediction results.
"""
import mmocr

# fixed the bug when using `--show-dir` after mocr v0.4.1
if version.parse(mmocr.__version__) < version.parse('0.4.1'):
from mmdet.apis import single_gpu_test
Expand Down
4 changes: 2 additions & 2 deletions mmdeploy/codebase/mmseg/deploy/segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,8 @@ def init_pytorch_model(self,
nn.Module: An initialized torch model generated by OpenMMLab
codebases.
"""
from mmseg.apis import init_segmentor
from mmcv.cnn.utils import revert_sync_batchnorm
from mmseg.apis import init_segmentor
model = init_segmentor(self.model_cfg, model_checkpoint, self.device)
model = revert_sync_batchnorm(model)
return model.eval()
Expand All @@ -109,8 +109,8 @@ def create_input(self,
Returns:
tuple: (data, img), meta information for the input image and input.
"""
from mmseg.datasets.pipelines import Compose
from mmcv.parallel import collate, scatter
from mmseg.datasets.pipelines import Compose
if not isinstance(imgs, (list, tuple)):
imgs = [imgs]
cfg = process_model_config(self.model_cfg, imgs, input_shape)
Expand Down
4 changes: 2 additions & 2 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ split_before_expression_after_opening_paren = true
[isort]
line_length = 79
multi_line_output = 0
known_standard_library = setuptools
extra_standard_library = setuptools
known_first_party = mmdeploy
known_third_party = mmcv
known_third_party = h5py,m2r,mmcls,mmcv,mmdeploy_python,mmdet,mmedit,mmocr,mmseg,ncnn,numpy,onnx,onnxruntime,packaging,pyppeteer,pyppl,pytest,pytorch_sphinx_theme,recommonmark,setuptools,sphinx,tensorrt,torch,torchvision
no_lines_before = STDLIB,LOCALFOLDER
default_section = THIRDPARTY
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,9 @@ def parse_requirements(fname='requirements.txt', with_version=True):
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
import re
require_fpath = fname

def parse_line(line):
Expand Down
7 changes: 3 additions & 4 deletions tests/test_backend/test_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@ def generate_onnx_file():

def onnx2backend(backend, onnx_file):
if backend == Backend.TENSORRT:
from mmdeploy.backend.tensorrt import create_trt_engine,\
save_trt_engine
from mmdeploy.backend.tensorrt import (create_trt_engine,
save_trt_engine)
backend_file = tempfile.NamedTemporaryFile(suffix='.engine').name
engine = create_trt_engine(
onnx_file, {
Expand All @@ -74,8 +74,7 @@ def onnx2backend(backend, onnx_file):
subprocess.call([onnx2ncnn_path, onnx_file, param_file, bin_file])
return param_file, bin_file
elif backend == Backend.OPENVINO:
from mmdeploy.apis.openvino import onnx2openvino,\
get_output_model_file
from mmdeploy.apis.openvino import get_output_model_file, onnx2openvino
backend_dir = tempfile.TemporaryDirectory().name
backend_file = get_output_model_file(onnx_file, backend_dir)
input_info = {'input': test_img.shape}
Expand Down
2 changes: 1 addition & 1 deletion tests/test_codebase/test_mmcls/test_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def test_get_partition_cfg():


def test_build_dataset_and_dataloader():
from torch.utils.data import Dataset, DataLoader
from torch.utils.data import DataLoader, Dataset
dataset = task_processor.build_dataset(
dataset_cfg=model_cfg, dataset_type='test')
assert isinstance(dataset, Dataset), 'Failed to build dataset'
Expand Down
8 changes: 4 additions & 4 deletions tests/test_codebase/test_mmdet/test_object_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,14 +70,14 @@ def backend_model():


def test_init_backend_model(backend_model):
from mmdeploy.codebase.mmdet.deploy.object_detection_model \
import End2EndModel
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
End2EndModel
assert isinstance(backend_model, End2EndModel)


def test_can_postprocess_masks():
from mmdeploy.codebase.mmdet.deploy.object_detection_model \
import End2EndModel
from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
End2EndModel
num_dets = [0, 1, 5]
for num_det in num_dets:
det_bboxes = np.random.randn(num_det, 4)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -418,6 +418,7 @@ class TestGetClassesFromCfg:
[data_cfg1, data_cfg2, data_cfg3, data_cfg4])
def test_get_classes_from_cfg(self, cfg):
from mmdet.datasets import DATASETS

from mmdeploy.codebase.mmdet.deploy.object_detection_model import \
get_classes_from_config

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ def setup_class(cls):
}})
model_cfg = 'tests/test_codebase/test_mmedit/data/model.py'
model_cfg = load_config(model_cfg)[0]
from mmdeploy.codebase.mmedit.deploy.super_resolution_model\
import End2EndModel
from mmdeploy.codebase.mmedit.deploy.super_resolution_model import \
End2EndModel
cls.end2end_model = End2EndModel(Backend.ONNXRUNTIME, [''], 'cpu',
model_cfg, deploy_cfg)

Expand Down
1 change: 1 addition & 0 deletions tests/test_codebase/test_mmocr/test_mmocr_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -481,6 +481,7 @@ def get_sar_model_cfg(decoder_type: str):
def test_sar_model(backend: Backend, decoder_type):
check_backend(backend)
import os.path as osp

import onnx
from mmocr.models.textrecog import SARNet
sar_cfg = get_sar_model_cfg(decoder_type)
Expand Down
2 changes: 1 addition & 1 deletion tests/test_codebase/test_mmocr/test_text_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def test_get_partition_cfg():


def test_build_dataset_and_dataloader():
from torch.utils.data import Dataset, DataLoader
from torch.utils.data import DataLoader, Dataset
dataset = task_processor.build_dataset(
dataset_cfg=model_cfg, dataset_type='test')
assert isinstance(dataset, Dataset), 'Failed to build dataset'
Expand Down
8 changes: 4 additions & 4 deletions tests/test_codebase/test_mmocr/test_text_detection_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ def setup_class(cls):
model_cfg_path = 'tests/test_codebase/test_mmocr/data/dbnet.py'
model_cfg = load_config(model_cfg_path)[0]

from mmdeploy.codebase.mmocr.deploy.text_detection_model \
import End2EndModel
from mmdeploy.codebase.mmocr.deploy.text_detection_model import \
End2EndModel
cls.end2end_model = End2EndModel(
Backend.ONNXRUNTIME, [''],
device='cpu',
Expand Down Expand Up @@ -96,8 +96,8 @@ def test_build_text_detection_model():
# simplify backend inference
with SwitchBackendWrapper(ORTWrapper) as wrapper:
wrapper.set(model_cfg=model_cfg, deploy_cfg=deploy_cfg)
from mmdeploy.codebase.mmocr.deploy.text_detection_model import \
build_text_detection_model, End2EndModel
from mmdeploy.codebase.mmocr.deploy.text_detection_model import (
End2EndModel, build_text_detection_model)
segmentor = build_text_detection_model([''], model_cfg, deploy_cfg,
'cpu')
assert isinstance(segmentor, End2EndModel)
2 changes: 1 addition & 1 deletion tests/test_codebase/test_mmocr/test_text_recognition.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def test_get_partition_cfg():


def test_build_dataset_and_dataloader():
from torch.utils.data import Dataset, DataLoader
from torch.utils.data import DataLoader, Dataset
dataset = task_processor.build_dataset(
dataset_cfg=model_cfg, dataset_type='test')
assert isinstance(dataset, Dataset), 'Failed to build dataset'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@ def setup_class(cls):
model_cfg_path = 'tests/test_codebase/test_mmocr/data/crnn.py'
model_cfg = load_config(model_cfg_path)[0]

from mmdeploy.codebase.mmocr.deploy.text_recognition_model \
import End2EndModel
from mmdeploy.codebase.mmocr.deploy.text_recognition_model import \
End2EndModel
cls.end2end_model = End2EndModel(
Backend.ONNXRUNTIME, [''],
device='cpu',
Expand Down Expand Up @@ -94,8 +94,8 @@ def test_build_text_recognition_model():
# simplify backend inference
with SwitchBackendWrapper(ORTWrapper) as wrapper:
wrapper.set(model_cfg=model_cfg, deploy_cfg=deploy_cfg)
from mmdeploy.codebase.mmocr.deploy.text_recognition_model import \
build_text_recognition_model, End2EndModel
from mmdeploy.codebase.mmocr.deploy.text_recognition_model import (
End2EndModel, build_text_recognition_model)
segmentor = build_text_recognition_model([''], model_cfg, deploy_cfg,
'cpu')
assert isinstance(segmentor, End2EndModel)
2 changes: 1 addition & 1 deletion tests/test_codebase/test_mmseg/test_segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def test_get_partition_cfg():


def test_build_dataset_and_dataloader():
from torch.utils.data import Dataset, DataLoader
from torch.utils.data import DataLoader, Dataset
dataset = task_processor.build_dataset(
dataset_cfg=model_cfg, dataset_type='test')
assert isinstance(dataset, Dataset), 'Failed to build dataset'
Expand Down
13 changes: 7 additions & 6 deletions tests/test_codebase/test_mmseg/test_segmentation_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@ def setup_class(cls):
'output_names': ['outputs']
}})

from mmdeploy.codebase.mmseg.deploy.segmentation_model \
import End2EndModel
from mmdeploy.codebase.mmseg.deploy.segmentation_model import \
End2EndModel
class_names = ['' for i in range(NUM_CLASS)]
palette = np.random.randint(0, 255, size=(NUM_CLASS, 3))
cls.end2end_model = End2EndModel(
Expand Down Expand Up @@ -86,8 +86,9 @@ def test_show_result(self):
@pytest.mark.parametrize('data_type', ['train', 'val', 'test'])
def test_get_classes_palette_from_config(from_file, data_type):
from mmseg.datasets import DATASETS
from mmdeploy.codebase.mmseg.deploy.segmentation_model \
import get_classes_palette_from_config

from mmdeploy.codebase.mmseg.deploy.segmentation_model import \
get_classes_palette_from_config
dataset_type = 'CityscapesDataset'
data_cfg = mmcv.Config({
'data': {
Expand Down Expand Up @@ -131,8 +132,8 @@ def test_build_segmentation_model():
# simplify backend inference
with SwitchBackendWrapper(ORTWrapper) as wrapper:
wrapper.set(model_cfg=model_cfg, deploy_cfg=deploy_cfg)
from mmdeploy.codebase.mmseg.deploy.segmentation_model import \
build_segmentation_model, End2EndModel
from mmdeploy.codebase.mmseg.deploy.segmentation_model import (
End2EndModel, build_segmentation_model)
segmentor = build_segmentation_model([''], model_cfg, deploy_cfg,
'cpu')
assert isinstance(segmentor, End2EndModel)
3 changes: 2 additions & 1 deletion tests/test_ops/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -619,8 +619,9 @@ def test_gather(backend,

# ncnn mat has implicit batch for mat, the ncnn_output is a mat,
# so the ncnn_outputs has 2 dimensions, not 1.
import onnxruntime
import importlib

import onnxruntime
assert importlib.util.find_spec('onnxruntime') is not None, 'onnxruntime \
not installed.'

Expand Down