Skip to content

Commit

Permalink
[Fix] replace mmcv's function and modules imported with mmengine's (o…
Browse files Browse the repository at this point in the history
…pen-mmlab#8594)

* use mmengine's load_state_dict and load_checkpoint

* from mmengine import dump

* from mmengine import FileClient dump list_from_file

* remove redundant registry

* update

* update

* update

* replace _load_checkpoint with CheckpointLoad.load_checkpoint

* changes according to mmcv open-mmlab#2216

* changes due to mmengine open-mmlab#447

* changes due mmengine open-mmlab#447 and mmcv open-mmlab#2217

* changes due mmengine open-mmlab#447 and mmcv open-mmlab#2217

* update

* update

* update
  • Loading branch information
chhluo authored Aug 23, 2022
1 parent a8c44e7 commit d0695e6
Show file tree
Hide file tree
Showing 76 changed files with 269 additions and 968 deletions.
11 changes: 6 additions & 5 deletions .dev_scripts/benchmark_inference_fps.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,10 @@
import os
import os.path as osp

import mmcv
from mmcv import Config, DictAction
from mmcv.runner import init_dist
from mmengine.config import Config, DictAction
from mmengine.dist import init_dist
from mmengine.fileio import dump
from mmengine.utils import mkdir_or_exist
from terminaltables import GithubFlavoredMarkdownTable

from tools.analysis_tools.benchmark import repeat_measure_inference_speed
Expand Down Expand Up @@ -164,7 +165,7 @@ def results2markdown(result_dict):
result_dict[cfg_path] = dict(fps=0, ms_times_pre_image=0)

if args.out:
mmcv.mkdir_or_exist(args.out)
mmcv.dump(result_dict, osp.join(args.out, 'batch_inference_fps.json'))
mkdir_or_exist(args.out)
dump(result_dict, osp.join(args.out, 'batch_inference_fps.json'))

results2markdown(result_dict)
3 changes: 2 additions & 1 deletion .dev_scripts/benchmark_test_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import mmcv
from mmengine.config import Config
from mmengine.logging import MMLogger
from mmengine.utils import mkdir_or_exist

from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
Expand Down Expand Up @@ -58,7 +59,7 @@ def inference_model(config_name, checkpoint, visualizer, args, logger=None):
out_file = None
if args.out_dir is not None:
out_dir = args.out_dir
mmcv.mkdir_or_exist(out_dir)
mkdir_or_exist(out_dir)

out_file = osp.join(
out_dir,
Expand Down
6 changes: 3 additions & 3 deletions .dev_scripts/download_checkpoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@
import os.path as osp
from multiprocessing import Pool

import mmcv
import torch
from mmcv import Config
from mmengine.config import Config
from mmengine.utils import mkdir_or_exist


def download(url, out_file, min_bytes=math.pow(1024, 2), progress=True):
Expand Down Expand Up @@ -52,7 +52,7 @@ def parse_args():

if __name__ == '__main__':
args = parse_args()
mmcv.mkdir_or_exist(args.out)
mkdir_or_exist(args.out)

cfg = Config.fromfile(args.config)

Expand Down
22 changes: 12 additions & 10 deletions .dev_scripts/gather_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,11 @@
import subprocess
from collections import OrderedDict

import mmcv
import torch
import yaml
from mmengine.config import Config
from mmengine.fileio import dump
from mmengine.utils import mkdir_or_exist, scandir


def ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds):
Expand Down Expand Up @@ -49,12 +51,12 @@ def process_checkpoint(in_file, out_file):


def is_by_epoch(config):
cfg = mmcv.Config.fromfile('./configs/' + config)
cfg = Config.fromfile('./configs/' + config)
return cfg.runner.type == 'EpochBasedRunner'


def get_final_epoch_or_iter(config):
cfg = mmcv.Config.fromfile('./configs/' + config)
cfg = Config.fromfile('./configs/' + config)
if cfg.runner.type == 'EpochBasedRunner':
return cfg.runner.max_epochs
else:
Expand All @@ -71,7 +73,7 @@ def get_best_epoch_or_iter(exp_dir):


def get_real_epoch_or_iter(config):
cfg = mmcv.Config.fromfile('./configs/' + config)
cfg = Config.fromfile('./configs/' + config)
if cfg.runner.type == 'EpochBasedRunner':
epoch = cfg.runner.max_epochs
if cfg.data.train.type == 'RepeatDataset':
Expand Down Expand Up @@ -142,7 +144,7 @@ def get_dataset_name(config):
WIDERFaceDataset='WIDER Face',
OpenImagesDataset='OpenImagesDataset',
OpenImagesChallengeDataset='OpenImagesChallengeDataset')
cfg = mmcv.Config.fromfile('./configs/' + config)
cfg = Config.fromfile('./configs/' + config)
return name_map[cfg.dataset_type]


Expand Down Expand Up @@ -226,10 +228,10 @@ def main():
args = parse_args()
models_root = args.root
models_out = args.out
mmcv.mkdir_or_exist(models_out)
mkdir_or_exist(models_out)

# find all models in the root directory to be gathered
raw_configs = list(mmcv.scandir('./configs', '.py', recursive=True))
raw_configs = list(scandir('./configs', '.py', recursive=True))

# filter configs that is not trained in the experiments dir
used_configs = []
Expand Down Expand Up @@ -261,7 +263,7 @@ def main():
log_json_path = list(
sorted(glob.glob(osp.join(exp_dir, '*.log.json'))))[-1]
log_txt_path = list(sorted(glob.glob(osp.join(exp_dir, '*.log'))))[-1]
cfg = mmcv.Config.fromfile('./configs/' + used_config)
cfg = Config.fromfile('./configs/' + used_config)
results_lut = cfg.evaluation.metric
if not isinstance(results_lut, list):
results_lut = [results_lut]
Expand Down Expand Up @@ -292,7 +294,7 @@ def main():
publish_model_infos = []
for model in model_infos:
model_publish_dir = osp.join(models_out, model['config'].rstrip('.py'))
mmcv.mkdir_or_exist(model_publish_dir)
mkdir_or_exist(model_publish_dir)

model_name = osp.split(model['config'])[-1].split('.')[0]

Expand Down Expand Up @@ -328,7 +330,7 @@ def main():

models = dict(models=publish_model_infos)
print(f'Totally gathered {len(publish_model_infos)} models')
mmcv.dump(models, osp.join(models_out, 'model_info.json'))
dump(models, osp.join(models_out, 'model_info.json'))

pwc_files = convert_model_info_to_pwc(publish_model_infos)
for name in pwc_files:
Expand Down
12 changes: 6 additions & 6 deletions .dev_scripts/gather_test_benchmark_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@
import glob
import os.path as osp

import mmcv
from mmcv import Config
from mmengine.config import Config
from mmengine.fileio import dump, load
from mmengine.utils import mkdir_or_exist


def parse_args():
Expand Down Expand Up @@ -49,7 +50,7 @@ def parse_args():
if len(json_list) > 0:
log_json_path = list(sorted(json_list))[-1]

metric = mmcv.load(log_json_path)
metric = load(log_json_path)
if config in metric.get('config', {}):

new_metrics = dict()
Expand Down Expand Up @@ -86,9 +87,8 @@ def parse_args():
print(f'{config} not exist dir: {metric_json_dir}')

if metrics_out:
mmcv.mkdir_or_exist(metrics_out)
mmcv.dump(result_dict,
osp.join(metrics_out, 'batch_test_metric_info.json'))
mkdir_or_exist(metrics_out)
dump(result_dict, osp.join(metrics_out, 'batch_test_metric_info.json'))
if not args.not_show:
print('===================================')
for config_name, metrics in result_dict.items():
Expand Down
11 changes: 6 additions & 5 deletions .dev_scripts/gather_train_benchmark_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,10 @@
import glob
import os.path as osp

import mmcv
from gather_models import get_final_results
from mmengine.config import Config
from mmengine.fileio import dump
from mmengine.utils import mkdir_or_exist

try:
import xlrd
Expand Down Expand Up @@ -78,7 +80,7 @@ def parse_args():
result_path = osp.join(root_path, config_name)
if osp.exists(result_path):
# 1 read config
cfg = mmcv.Config.fromfile(config)
cfg = Config.fromfile(config)
total_epochs = cfg.runner.max_epochs
final_results = cfg.evaluation.metric
if not isinstance(final_results, list):
Expand Down Expand Up @@ -136,9 +138,8 @@ def parse_args():

# 4 save or print results
if metrics_out:
mmcv.mkdir_or_exist(metrics_out)
mmcv.dump(result_dict,
osp.join(metrics_out, 'model_metric_info.json'))
mkdir_or_exist(metrics_out)
dump(result_dict, osp.join(metrics_out, 'model_metric_info.json'))
if not args.not_show:
print('===================================')
for config_name, metrics in result_dict.items():
Expand Down
12 changes: 6 additions & 6 deletions .dev_scripts/test_init_backbone.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,9 @@
from os.path import dirname, exists, join

import pytest
from mmcv import Config, ProgressBar
from mmcv.runner import _load_checkpoint
from mmengine.config import Config
from mmengine.runner import CheckpointLoader
from mmengine.utils import ProgressBar

from mmdet.models import build_detector

Expand All @@ -28,7 +29,6 @@ def _get_config_directory():

def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
Expand Down Expand Up @@ -91,8 +91,8 @@ def _check_backbone(config, print_cfg=True):
"""Check out backbone whether successfully load pretrained model, by using
`backbone.init_cfg`.
First, using `mmcv._load_checkpoint` to load the checkpoint without
loading models.
First, using `CheckpointLoader.load_checkpoint` to load the checkpoint
without loading models.
Then, using `build_detector` to build models, and using
`model.init_weights()` to initialize the parameters.
Finally, assert weights and bias of each layer loaded from pretrained
Expand Down Expand Up @@ -120,7 +120,7 @@ def _check_backbone(config, print_cfg=True):
if init_cfg is None or init_cfg.get('type') != 'Pretrained':
init_flag = False
if init_flag:
checkpoint = _load_checkpoint(init_cfg.checkpoint)
checkpoint = CheckpointLoader.load_checkpoint(init_cfg.checkpoint)
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
Expand Down
3 changes: 2 additions & 1 deletion demo/create_result_gif.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import matplotlib.pyplot as plt
import mmcv
import numpy as np
from mmengine.utils import scandir

try:
import imageio
Expand Down Expand Up @@ -80,7 +81,7 @@ def create_frame_by_matplotlib(image_dir,

images_list = []
for dir_names in result_dir_names:
images_list.append(mmcv.scandir(osp.join(image_dir, dir_names)))
images_list.append(scandir(osp.join(image_dir, dir_names)))

frames = []
for paths in _generate_batch_data(zip(*images_list), nrows):
Expand Down
3 changes: 2 additions & 1 deletion demo/video_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import cv2
import mmcv
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress

from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
Expand Down Expand Up @@ -60,7 +61,7 @@ def main():
args.out, fourcc, video_reader.fps,
(video_reader.width, video_reader.height))

for frame in mmcv.track_iter_progress(video_reader):
for frame in track_iter_progress(video_reader):
result = inference_detector(model, frame, test_pipeline=test_pipeline)
visualizer.add_datasample(
name='video',
Expand Down
3 changes: 2 additions & 1 deletion demo/video_gpuaccel_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import torch
import torch.nn as nn
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress

from mmdet.apis import init_detector
from mmdet.registry import VISUALIZERS
Expand Down Expand Up @@ -113,7 +114,7 @@ def main():

with torch.no_grad():
for i, (frame_resize, frame_origin) in enumerate(
zip(mmcv.track_iter_progress(video_resize), video_origin)):
zip(track_iter_progress(video_resize), video_origin)):
data = pack_data(frame_resize, batch_input_shape, ori_shape)
result = model.test_step([data])[0]

Expand Down
5 changes: 4 additions & 1 deletion mmdet/datasets/builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,10 @@ def _concat_dataset(cfg, default_args=None):

# TODO: Need to refactor later
def build_dataset(cfg, default_args=None):
from .dataset_wrappers import ClassBalancedDataset, MultiImageMixDataset
from mmengine.dataset import ClassBalancedDataset

from .dataset_wrappers import MultiImageMixDataset

if cfg['type'] == 'ClassBalancedDataset':
dataset = ClassBalancedDataset(
build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
Expand Down
27 changes: 13 additions & 14 deletions mmdet/datasets/transforms/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,7 @@
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import (ImageToTensor, PackDetInputs, ToDataContainer,
ToTensor, Transpose)
from .formatting import ImageToTensor, PackDetInputs, ToTensor, Transpose
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
Expand All @@ -19,16 +18,16 @@
from .wrappers import MultiBranch, RandomOrder

__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'LoadImageFromNDArray', 'LoadAnnotations',
'LoadPanopticAnnotations', 'LoadMultiChannelImageFromFiles',
'LoadProposals', 'Resize', 'RandomFlip', 'RandomCrop', 'Normalize',
'SegRescale', 'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion',
'Albu', 'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut',
'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize', 'Brightness',
'Contrast', 'TranslateX', 'TranslateY', 'RandomShift', 'Mosaic', 'MixUp',
'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste', 'FilterAnnotations',
'Pad', 'GeomTransform', 'ColorTransform', 'RandAugment', 'Sharpness',
'Solarize', 'SolarizeAdd', 'Posterize', 'AutoContrast', 'Invert',
'MultiBranch', 'RandomErasing', 'LoadEmptyAnnotations', 'RandomOrder'
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder'
]
2 changes: 1 addition & 1 deletion mmdet/datasets/transforms/augment_wrappers.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@
from typing import List, Optional, Union

import numpy as np
from mmcv import ConfigDict
from mmcv.transforms import RandomChoice
from mmcv.transforms.utils import cache_randomness
from mmengine.config import ConfigDict

from mmdet.registry import TRANSFORMS

Expand Down
Loading

0 comments on commit d0695e6

Please sign in to comment.