From 7901eed18b30c1066bf5d0123679c1fc03fb7cc5 Mon Sep 17 00:00:00 2001 From: daquexian Date: Mon, 1 Aug 2022 10:09:57 +0800 Subject: [PATCH 01/23] Upgrade onnxsim to at least 0.4.0 (#8383) * Upgrade onnxsim to 0.4.0 * Update pytorch2onnx.py * Update pytorch2onnx.py --- tools/deployment/pytorch2onnx.py | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/tools/deployment/pytorch2onnx.py b/tools/deployment/pytorch2onnx.py index 5c786f8540e..ee856ccb646 100644 --- a/tools/deployment/pytorch2onnx.py +++ b/tools/deployment/pytorch2onnx.py @@ -101,31 +101,17 @@ def pytorch2onnx(model, model.forward = origin_forward - # get the custom op path - ort_custom_op_path = '' - try: - from mmcv.ops import get_onnxruntime_op_path - ort_custom_op_path = get_onnxruntime_op_path() - except (ImportError, ModuleNotFoundError): - warnings.warn('If input model has custom op from mmcv, \ - you may have to build mmcv with ONNXRuntime from source.') - if do_simplify: import onnxsim from mmdet import digit_version - min_required_version = '0.3.0' + min_required_version = '0.4.0' assert digit_version(onnxsim.__version__) >= digit_version( min_required_version - ), f'Requires to install onnx-simplify>={min_required_version}' + ), f'Requires to install onnxsim>={min_required_version}' - input_dic = {'input': img_list[0].detach().cpu().numpy()} - model_opt, check_ok = onnxsim.simplify( - output_file, - input_data=input_dic, - custom_lib=ort_custom_op_path, - dynamic_input_shape=dynamic_export) + model_opt, check_ok = onnxsim.simplify(output_file) if check_ok: onnx.save(model_opt, output_file) print(f'Successfully simplified ONNX model: {output_file}') From af145e98f21bbcdd79e40f54f50d3cd936318297 Mon Sep 17 00:00:00 2001 From: lyq10085 <52818035+lyq10085@users.noreply.github.com> Date: Mon, 1 Aug 2022 10:11:11 +0800 Subject: [PATCH 02/23] Add .github/workflow/stale.yml (#8445) * Add .github/workflow/stale.yml * modify prompt message in stale.yml * modify check strategy now, issues and prs with any of 'invalid', 'awaiting response' will be checked --- .github/workflows/stale.yml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 .github/workflows/stale.yml diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 00000000000..b3506c7a698 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,25 @@ +name: 'Close stale issues and PRs' + +on: + schedule: + # check issue and pull request once every day + - cron: '25 11 * * *' + +jobs: + invalid-stale-close: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v4 + with: + stale-issue-message: 'This issue is marked as stale because it has been marked as invalid or awaiting response for 7 days without any further response. It will be closed in 5 days if the stale label is not removed or if there is no further response.' + stale-pr-message: 'This PR is marked as stale because there has been no activity in the past 45 days. It will be closed in 10 days if the stale label is not removed or if there is no further updates.' + close-issue-message: 'This issue is closed because it has been stale for 5 days. Please open a new issue if you have similar issues or you have any new updates now.' + close-pr-message: 'This PR is closed because it has been stale for 10 days. Please reopen this PR if you have any updates and want to keep contributing the code.' + # only issues/PRS with any of invalid and awaiting response labels are checked + any-of-labels: 'invalid, awaiting response' + days-before-issue-stale: 7 + days-before-pr-stale: 45 + days-before-issue-close: 5 + days-before-pr-close: 10 + # automatically remove the stale label when the issues or the pull reqquests are updated or commented + remove-stale-when-updated: true From 0a083c8a296c4714c2502c1e92073a1bf148fb04 Mon Sep 17 00:00:00 2001 From: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com> Date: Fri, 22 Jul 2022 15:16:08 +0800 Subject: [PATCH 03/23] [Doc]: fix markdown version (#8408) --- requirements/docs.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements/docs.txt b/requirements/docs.txt index d251554cb4e..5a96c2a2303 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -1,4 +1,5 @@ docutils==0.16.0 +markdown<3.4.0 myst-parser -e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme sphinx==4.0.2 From cd6f56bcb936ac98d379e15c1f1e65c467472f16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Furkan=20BA=C4=9ECI?= Date: Wed, 3 Aug 2022 20:17:29 +0300 Subject: [PATCH 04/23] Fix typo in HTC link (#8487) Fix HTC link --- configs/htc/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configs/htc/README.md b/configs/htc/README.md index 03a89aaa899..747f8f60830 100644 --- a/configs/htc/README.md +++ b/configs/htc/README.md @@ -1,6 +1,6 @@ # HTC -> [Hybrid Task Cascade for Instance Segmentation](ttps://arxiv.org/abs/1901.07518) +> [Hybrid Task Cascade for Instance Segmentation](https://arxiv.org/abs/1901.07518) From ea5c587f0253e8e24c26f1081b5f8707c2fc18d5 Mon Sep 17 00:00:00 2001 From: James Date: Tue, 9 Aug 2022 13:48:44 +0800 Subject: [PATCH 05/23] Fix DyDCNv2 RuntimeError (#8485) the parameter of offset is not set as continuous will trigger the runtime error: offset must be continuous --- mmdet/models/necks/dyhead.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmdet/models/necks/dyhead.py b/mmdet/models/necks/dyhead.py index d20dd1cfd41..649bb4ca2f4 100644 --- a/mmdet/models/necks/dyhead.py +++ b/mmdet/models/necks/dyhead.py @@ -44,7 +44,7 @@ def __init__(self, def forward(self, x, offset, mask): """Forward function.""" - x = self.conv(x.contiguous(), offset, mask) + x = self.conv(x.contiguous(), offset.contiguous(), mask) if self.with_norm: x = self.norm(x) return x From 2a59519343576a6a8692d2589ff02aad8b1f3212 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Furkan=20BA=C4=9ECI?= Date: Tue, 9 Aug 2022 08:51:40 +0300 Subject: [PATCH 06/23] Fix method doc (#8512) --- mmdet/core/bbox/iou_calculators/iou2d_calculator.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/mmdet/core/bbox/iou_calculators/iou2d_calculator.py b/mmdet/core/bbox/iou_calculators/iou2d_calculator.py index 4656d619842..b71a5557ea1 100644 --- a/mmdet/core/bbox/iou_calculators/iou2d_calculator.py +++ b/mmdet/core/bbox/iou_calculators/iou2d_calculator.py @@ -33,10 +33,9 @@ def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False): Args: bboxes1 (Tensor): bboxes have shape (m, 4) in format, or shape (m, 5) in format. - bboxes2 (Tensor): bboxes have shape (m, 4) in - format, shape (m, 5) in format, or be - empty. If ``is_aligned `` is ``True``, then m and n must be - equal. + bboxes2 (Tensor): bboxes have shape (n, 4) in + format, shape (n, 5) in format, or be + empty. mode (str): "iou" (intersection over union), "iof" (intersection over foreground), or "giou" (generalized intersection over union). From e26c75a78fc55ce9cb09ae758c6d8b68865a2162 Mon Sep 17 00:00:00 2001 From: FAThomson Date: Fri, 12 Aug 2022 12:55:16 +0200 Subject: [PATCH 07/23] Add tuple support in formatting results (#8549) --- tools/analysis_tools/analyze_results.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/analysis_tools/analyze_results.py b/tools/analysis_tools/analyze_results.py index 99881443538..4d8b60c96da 100644 --- a/tools/analysis_tools/analyze_results.py +++ b/tools/analysis_tools/analyze_results.py @@ -167,10 +167,14 @@ def evaluate_and_show(self, elif isinstance(results[0], list): good_samples, bad_samples = self.detection_evaluate( dataset, results, topk=topk) + elif isinstance(results[0], tuple): + results_ = [result[0] for result in results] + good_samples, bad_samples = self.detection_evaluate( + dataset, results_, topk=topk) else: raise 'The format of result is not supported yet. ' \ 'Current dict for panoptic segmentation and list ' \ - 'for object detection are supported.' + 'or tuple for object detection are supported.' good_dir = osp.abspath(osp.join(show_dir, 'good')) bad_dir = osp.abspath(osp.join(show_dir, 'bad')) From 16feb4437167d831b48251f99aafea0395cd79be Mon Sep 17 00:00:00 2001 From: Shunchi Zhang Date: Wed, 17 Aug 2022 14:07:20 +0800 Subject: [PATCH 08/23] Added missed Chinese tutorial link (#8563) (#8564) --- docs/zh_cn/index.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/zh_cn/index.rst b/docs/zh_cn/index.rst index f0e00c984cb..af96fac2b65 100644 --- a/docs/zh_cn/index.rst +++ b/docs/zh_cn/index.rst @@ -15,6 +15,7 @@ Welcome to MMDetection's documentation! 1_exist_data_model.md 2_new_data_model.md + 3_exist_data_new_model.md .. toctree:: :maxdepth: 2 From c1154e933679c8080ab98b65152556cb9d6e9c37 Mon Sep 17 00:00:00 2001 From: ceasona <37234316+ceasona@users.noreply.github.com> Date: Fri, 19 Aug 2022 11:41:51 +0800 Subject: [PATCH 09/23] [fix]: fix repeated import of CascadeRPNHead (#8578) --- mmdet/models/dense_heads/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mmdet/models/dense_heads/__init__.py b/mmdet/models/dense_heads/__init__.py index bc26ca3a915..1c2286996e7 100644 --- a/mmdet/models/dense_heads/__init__.py +++ b/mmdet/models/dense_heads/__init__.py @@ -50,9 +50,9 @@ 'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead', 'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', - 'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead', - 'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead', - 'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead', + 'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', + 'DETRHead', 'YOLOFHead', 'DeformableDETRHead', 'SOLOHead', + 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead', 'DecoupledSOLOLightHead', 'LADHead', 'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'SOLOV2Head', 'DDODHead' ] From ed1b42d3cf834849c1159af72a1f903a4951b5a1 Mon Sep 17 00:00:00 2001 From: Kaixin Li Date: Tue, 23 Aug 2022 11:53:10 +0800 Subject: [PATCH 10/23] [Doc]: Fix mistakes in formula (#8607) --- mmdet/models/utils/gaussian_target.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mmdet/models/utils/gaussian_target.py b/mmdet/models/utils/gaussian_target.py index 5bf4d558ce0..9997d3b13a9 100644 --- a/mmdet/models/utils/gaussian_target.py +++ b/mmdet/models/utils/gaussian_target.py @@ -104,7 +104,7 @@ def gaussian_radius(det_size, min_overlap): .. math:: \cfrac{(w-r)*(h-r)}{w*h+(w+h)r-r^2} \ge {iou} \quad\Rightarrow\quad {r^2-(w+h)r+\cfrac{1-iou}{1+iou}*w*h} \ge 0 \\ - {a} = 1,\quad{b} = {-(w+h)},\quad{c} = {\cfrac{1-iou}{1+iou}*w*h} + {a} = 1,\quad{b} = {-(w+h)},\quad{c} = {\cfrac{1-iou}{1+iou}*w*h} \\ {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a} - Case2: both two corners are inside the gt box. @@ -128,7 +128,7 @@ def gaussian_radius(det_size, min_overlap): .. math:: \cfrac{(w-2*r)*(h-2*r)}{w*h} \ge {iou} \quad\Rightarrow\quad {4r^2-2(w+h)r+(1-iou)*w*h} \ge 0 \\ - {a} = 4,\quad {b} = {-2(w+h)},\quad {c} = {(1-iou)*w*h} + {a} = 4,\quad {b} = {-2(w+h)},\quad {c} = {(1-iou)*w*h} \\ {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a} - Case3: both two corners are outside the gt box. From 2f249ff64488275b079dc2bfc9adbd67aabe6028 Mon Sep 17 00:00:00 2001 From: Norman Mu Date: Mon, 22 Aug 2022 21:01:58 -0700 Subject: [PATCH 11/23] [fix]: Fix swin backbone absolute pos_embed (#8127) * Fix swin backbone absolute pos_embed resizing * fix lint * fix lint * add unit test * Update swin.py Co-authored-by: Cedric Luo --- mmdet/models/backbones/swin.py | 15 ++++++++++++--- tests/test_models/test_backbones/test_swin.py | 5 +++++ 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/mmdet/models/backbones/swin.py b/mmdet/models/backbones/swin.py index efbd586fe91..176a562a09f 100644 --- a/mmdet/models/backbones/swin.py +++ b/mmdet/models/backbones/swin.py @@ -588,9 +588,8 @@ def __init__(self, if self.use_abs_pos_embed: patch_row = pretrain_img_size[0] // patch_size patch_col = pretrain_img_size[1] // patch_size - num_patches = patch_row * patch_col self.absolute_pos_embed = nn.Parameter( - torch.zeros((1, num_patches, embed_dims))) + torch.zeros((1, embed_dims, patch_row, patch_col))) self.drop_after_pos = nn.Dropout(p=drop_rate) @@ -746,7 +745,17 @@ def forward(self, x): x, hw_shape = self.patch_embed(x) if self.use_abs_pos_embed: - x = x + self.absolute_pos_embed + h, w = self.absolute_pos_embed.shape[1:3] + if hw_shape[0] != h or hw_shape[1] != w: + absolute_pos_embed = F.interpolate( + self.absolute_pos_embed, + size=hw_shape, + mode='bicubic', + align_corners=False).flatten(2).transpose(1, 2) + else: + absolute_pos_embed = self.absolute_pos_embed.flatten( + 2).transpose(1, 2) + x = x + absolute_pos_embed x = self.drop_after_pos(x) outs = [] diff --git a/tests/test_models/test_backbones/test_swin.py b/tests/test_models/test_backbones/test_swin.py index 9d6420cf8e3..5369ef2f3c7 100644 --- a/tests/test_models/test_backbones/test_swin.py +++ b/tests/test_models/test_backbones/test_swin.py @@ -44,6 +44,11 @@ def test_swin_transformer(): model = SwinTransformer(pretrain_img_size=224, use_abs_pos_embed=True) model.init_weights() model(temp) + # Test different inputs when use absolute position embedding + temp = torch.randn((1, 3, 112, 112)) + model(temp) + temp = torch.randn((1, 3, 256, 256)) + model(temp) # Test patch norm model = SwinTransformer(patch_norm=False) From 8c0577597fa260f2645f51a1b65b2751fb649180 Mon Sep 17 00:00:00 2001 From: Oleg Durygin <43780093+Lehsuby@users.noreply.github.com> Date: Fri, 26 Aug 2022 14:50:19 +0400 Subject: [PATCH 12/23] [Fix]: Fix get train_pipeline method of val workflow (#8575) --- tools/train.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/train.py b/tools/train.py index cff19f037e1..3b325d988e5 100644 --- a/tools/train.py +++ b/tools/train.py @@ -217,8 +217,10 @@ def main(): datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) == 2: + assert 'val' in [mode for (mode, _) in cfg.workflow] val_dataset = copy.deepcopy(cfg.data.val) - val_dataset.pipeline = cfg.data.train.pipeline + val_dataset.pipeline = cfg.data.train.get( + 'pipeline', cfg.data.train.dataset.get('pipeline')) datasets.append(build_dataset(val_dataset)) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in From df0694cba7d4cd7852017f5433b405d5eab6abe8 Mon Sep 17 00:00:00 2001 From: JiayuXu <84259897+JiayuXu0@users.noreply.github.com> Date: Tue, 30 Aug 2022 17:03:51 +0800 Subject: [PATCH 13/23] Fix floordiv warning. (#8648) * Fix floordiv warning. * Add floordiv wrapper. --- mmdet/models/dense_heads/solo_head.py | 35 ++++++++++++++++++------- mmdet/models/dense_heads/solov2_head.py | 35 ++++++++++++++++++------- mmdet/utils/misc.py | 15 ++++++++++- 3 files changed, 64 insertions(+), 21 deletions(-) diff --git a/mmdet/models/dense_heads/solo_head.py b/mmdet/models/dense_heads/solo_head.py index 9f5719e9f88..e89aacb420a 100644 --- a/mmdet/models/dense_heads/solo_head.py +++ b/mmdet/models/dense_heads/solo_head.py @@ -9,6 +9,7 @@ from mmdet.core import InstanceData, mask_matrix_nms, multi_apply from mmdet.core.utils import center_of_mass, generate_coordinate from mmdet.models.builder import HEADS, build_loss +from mmdet.utils.misc import floordiv from .base_mask_head import BaseMaskHead @@ -375,27 +376,41 @@ def _get_targets_single(self, center_h, center_w = center_of_mass(gt_mask) coord_w = int( - (center_w / upsampled_size[1]) // (1. / num_grid)) + floordiv((center_w / upsampled_size[1]), (1. / num_grid), + rounding_mode='trunc')) coord_h = int( - (center_h / upsampled_size[0]) // (1. / num_grid)) + floordiv((center_h / upsampled_size[0]), (1. / num_grid), + rounding_mode='trunc')) # left, top, right, down top_box = max( 0, - int(((center_h - pos_h_range) / upsampled_size[0]) // - (1. / num_grid))) + int( + floordiv( + (center_h - pos_h_range) / upsampled_size[0], + (1. / num_grid), + rounding_mode='trunc'))) down_box = min( num_grid - 1, - int(((center_h + pos_h_range) / upsampled_size[0]) // - (1. / num_grid))) + int( + floordiv( + (center_h + pos_h_range) / upsampled_size[0], + (1. / num_grid), + rounding_mode='trunc'))) left_box = max( 0, - int(((center_w - pos_w_range) / upsampled_size[1]) // - (1. / num_grid))) + int( + floordiv( + (center_w - pos_w_range) / upsampled_size[1], + (1. / num_grid), + rounding_mode='trunc'))) right_box = min( num_grid - 1, - int(((center_w + pos_w_range) / upsampled_size[1]) // - (1. / num_grid))) + int( + floordiv( + (center_w + pos_w_range) / upsampled_size[1], + (1. / num_grid), + rounding_mode='trunc'))) top = max(top_box, coord_h - 1) down = min(down_box, coord_h + 1) diff --git a/mmdet/models/dense_heads/solov2_head.py b/mmdet/models/dense_heads/solov2_head.py index 6f2defe0951..9edf99d8c26 100644 --- a/mmdet/models/dense_heads/solov2_head.py +++ b/mmdet/models/dense_heads/solov2_head.py @@ -12,6 +12,7 @@ from mmdet.core import InstanceData, mask_matrix_nms, multi_apply from mmdet.core.utils import center_of_mass, generate_coordinate from mmdet.models.builder import HEADS +from mmdet.utils.misc import floordiv from .solo_head import SOLOHead @@ -382,27 +383,41 @@ def _get_targets_single(self, center_h, center_w = center_of_mass(gt_mask) coord_w = int( - (center_w / upsampled_size[1]) // (1. / num_grid)) + floordiv((center_w / upsampled_size[1]), (1. / num_grid), + rounding_mode='trunc')) coord_h = int( - (center_h / upsampled_size[0]) // (1. / num_grid)) + floordiv((center_h / upsampled_size[0]), (1. / num_grid), + rounding_mode='trunc')) # left, top, right, down top_box = max( 0, - int(((center_h - pos_h_range) / upsampled_size[0]) // - (1. / num_grid))) + int( + floordiv( + (center_h - pos_h_range) / upsampled_size[0], + (1. / num_grid), + rounding_mode='trunc'))) down_box = min( num_grid - 1, - int(((center_h + pos_h_range) / upsampled_size[0]) // - (1. / num_grid))) + int( + floordiv( + (center_h + pos_h_range) / upsampled_size[0], + (1. / num_grid), + rounding_mode='trunc'))) left_box = max( 0, - int(((center_w - pos_w_range) / upsampled_size[1]) // - (1. / num_grid))) + int( + floordiv( + (center_w - pos_w_range) / upsampled_size[1], + (1. / num_grid), + rounding_mode='trunc'))) right_box = min( num_grid - 1, - int(((center_w + pos_w_range) / upsampled_size[1]) // - (1. / num_grid))) + int( + floordiv( + (center_w + pos_w_range) / upsampled_size[1], + (1. / num_grid), + rounding_mode='trunc'))) top = max(top_box, coord_h - 1) down = min(down_box, coord_h + 1) diff --git a/mmdet/utils/misc.py b/mmdet/utils/misc.py index 4113672acfb..2017cbb9466 100644 --- a/mmdet/utils/misc.py +++ b/mmdet/utils/misc.py @@ -5,7 +5,8 @@ import warnings import mmcv -from mmcv.utils import print_log +import torch +from mmcv.utils import TORCH_VERSION, digit_version, print_log def find_latest_checkpoint(path, suffix='pth'): @@ -74,3 +75,15 @@ def update(cfg, src_str, dst_str): update(cfg.data, cfg.data_root, dst_root) cfg.data_root = dst_root + + +_torch_version_div_indexing = ( + 'parrots' not in TORCH_VERSION + and digit_version(TORCH_VERSION) >= digit_version('1.8')) + + +def floordiv(dividend, divisor, rounding_mode='trunc'): + if _torch_version_div_indexing: + return torch.div(dividend, divisor, rounding_mode=rounding_mode) + else: + return dividend // divisor From bc295848ca9966c9b7e5efc82a12d5a445715933 Mon Sep 17 00:00:00 2001 From: AmirMasoud Nourollah <61701369+Nourollah@users.noreply.github.com> Date: Mon, 5 Sep 2022 07:01:51 +0430 Subject: [PATCH 14/23] [Docs] Update Config Doc to Add WandB Hook (#8663) * logger hooks samples updated * [Docs] MMDetWandB LoggerHook Details Added * [Docs] lint test passed --- docs/en/tutorials/config.md | 156 +++++++++++++++++---------------- docs/zh_cn/tutorials/config.md | 155 ++++++++++++++++---------------- 2 files changed, 161 insertions(+), 150 deletions(-) diff --git a/docs/en/tutorials/config.md b/docs/en/tutorials/config.md index 6b232cf5493..9f7759a2079 100644 --- a/docs/en/tutorials/config.md +++ b/docs/en/tutorials/config.md @@ -71,8 +71,8 @@ The `train_cfg` and `test_cfg` are deprecated in config file, please specify the ```python # deprecated model = dict( - type=..., - ... + type=..., + ... ) train_cfg=dict(...) test_cfg=dict(...) @@ -83,10 +83,10 @@ The migration example is as below. ```python # recommended model = dict( - type=..., - ... - train_cfg=dict(...), - test_cfg=dict(...), + type=..., + ... +train_cfg=dict(...), + test_cfg=dict(...), ) ``` @@ -109,8 +109,8 @@ model = dict( type='BN', # Type of norm layer, usually it is BN or GN requires_grad=True), # Whether to train the gamma and beta in BN norm_eval=True, # Whether to freeze the statistics in BN - style='pytorch', # The style of backbone, 'pytorch' means that stride 2 layers are in 3x3 conv, 'caffe' means stride 2 layers are in 1x1 convs. - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), # The ImageNet pretrained backbone to be loaded + style='pytorch', # The style of backbone, 'pytorch' means that stride 2 layers are in 3x3 conv, 'caffe' means stride 2 layers are in 1x1 convs. + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), # The ImageNet pretrained backbone to be loaded neck=dict( type='FPN', # The neck of detector is FPN. We also support 'NASFPN', 'PAFPN', etc. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/necks/fpn.py#L10 for more details. in_channels=[256, 512, 1024, 2048], # The input channels, this is consistent with the output channels of backbone @@ -182,70 +182,70 @@ model = dict( type='CrossEntropyLoss', # Type of loss used for segmentation use_mask=True, # Whether to only train the mask in the correct class. loss_weight=1.0)))) # Loss weight of mask branch. - train_cfg = dict( # Config of training hyperparameters for rpn and rcnn - rpn=dict( # Training config of rpn - assigner=dict( # Config of assigner - type='MaxIoUAssigner', # Type of assigner, MaxIoUAssigner is used for many common detectors. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/max_iou_assigner.py#L10 for more details. - pos_iou_thr=0.7, # IoU >= threshold 0.7 will be taken as positive samples - neg_iou_thr=0.3, # IoU < threshold 0.3 will be taken as negative samples - min_pos_iou=0.3, # The minimal IoU threshold to take boxes as positive samples - match_low_quality=True, # Whether to match the boxes under low quality (see API doc for more details). - ignore_iof_thr=-1), # IoF threshold for ignoring bboxes - sampler=dict( # Config of positive/negative sampler - type='RandomSampler', # Type of sampler, PseudoSampler and other samplers are also supported. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/samplers/random_sampler.py#L8 for implementation details. - num=256, # Number of samples - pos_fraction=0.5, # The ratio of positive samples in the total samples. - neg_pos_ub=-1, # The upper bound of negative samples based on the number of positive samples. - add_gt_as_proposals=False), # Whether add GT as proposals after sampling. - allowed_border=-1, # The border allowed after padding for valid anchors. - pos_weight=-1, # The weight of positive samples during training. - debug=False), # Whether to set the debug mode - rpn_proposal=dict( # The config to generate proposals during training - nms_across_levels=False, # Whether to do NMS for boxes across levels. Only work in `GARPNHead`, naive rpn does not support do nms cross levels. - nms_pre=2000, # The number of boxes before NMS - nms_post=1000, # The number of boxes to be kept by NMS, Only work in `GARPNHead`. - max_per_img=1000, # The number of boxes to be kept after NMS. - nms=dict( # Config of NMS - type='nms', # Type of NMS - iou_threshold=0.7 # NMS threshold - ), - min_bbox_size=0), # The allowed minimal box size - rcnn=dict( # The config for the roi heads. - assigner=dict( # Config of assigner for second stage, this is different for that in rpn - type='MaxIoUAssigner', # Type of assigner, MaxIoUAssigner is used for all roi_heads for now. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/max_iou_assigner.py#L10 for more details. - pos_iou_thr=0.5, # IoU >= threshold 0.5 will be taken as positive samples - neg_iou_thr=0.5, # IoU < threshold 0.5 will be taken as negative samples - min_pos_iou=0.5, # The minimal IoU threshold to take boxes as positive samples - match_low_quality=False, # Whether to match the boxes under low quality (see API doc for more details). - ignore_iof_thr=-1), # IoF threshold for ignoring bboxes - sampler=dict( - type='RandomSampler', # Type of sampler, PseudoSampler and other samplers are also supported. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/samplers/random_sampler.py#L8 for implementation details. - num=512, # Number of samples - pos_fraction=0.25, # The ratio of positive samples in the total samples. - neg_pos_ub=-1, # The upper bound of negative samples based on the number of positive samples. - add_gt_as_proposals=True - ), # Whether add GT as proposals after sampling. - mask_size=28, # Size of mask - pos_weight=-1, # The weight of positive samples during training. - debug=False)) # Whether to set the debug mode - test_cfg = dict( # Config for testing hyperparameters for rpn and rcnn - rpn=dict( # The config to generate proposals during testing - nms_across_levels=False, # Whether to do NMS for boxes across levels. Only work in `GARPNHead`, naive rpn does not support do nms cross levels. - nms_pre=1000, # The number of boxes before NMS - nms_post=1000, # The number of boxes to be kept by NMS, Only work in `GARPNHead`. - max_per_img=1000, # The number of boxes to be kept after NMS. - nms=dict( # Config of NMS - type='nms', #Type of NMS - iou_threshold=0.7 # NMS threshold - ), - min_bbox_size=0), # The allowed minimal box size - rcnn=dict( # The config for the roi heads. - score_thr=0.05, # Threshold to filter out boxes - nms=dict( # Config of NMS in the second stage - type='nms', # Type of NMS - iou_thr=0.5), # NMS threshold - max_per_img=100, # Max number of detections of each image - mask_thr_binary=0.5)) # Threshold of mask prediction +train_cfg = dict( # Config of training hyperparameters for rpn and rcnn + rpn=dict( # Training config of rpn + assigner=dict( # Config of assigner + type='MaxIoUAssigner', # Type of assigner, MaxIoUAssigner is used for many common detectors. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/max_iou_assigner.py#L10 for more details. + pos_iou_thr=0.7, # IoU >= threshold 0.7 will be taken as positive samples + neg_iou_thr=0.3, # IoU < threshold 0.3 will be taken as negative samples + min_pos_iou=0.3, # The minimal IoU threshold to take boxes as positive samples + match_low_quality=True, # Whether to match the boxes under low quality (see API doc for more details). + ignore_iof_thr=-1), # IoF threshold for ignoring bboxes + sampler=dict( # Config of positive/negative sampler + type='RandomSampler', # Type of sampler, PseudoSampler and other samplers are also supported. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/samplers/random_sampler.py#L8 for implementation details. + num=256, # Number of samples + pos_fraction=0.5, # The ratio of positive samples in the total samples. + neg_pos_ub=-1, # The upper bound of negative samples based on the number of positive samples. + add_gt_as_proposals=False), # Whether add GT as proposals after sampling. + allowed_border=-1, # The border allowed after padding for valid anchors. + pos_weight=-1, # The weight of positive samples during training. + debug=False), # Whether to set the debug mode + rpn_proposal=dict( # The config to generate proposals during training + nms_across_levels=False, # Whether to do NMS for boxes across levels. Only work in `GARPNHead`, naive rpn does not support do nms cross levels. + nms_pre=2000, # The number of boxes before NMS + nms_post=1000, # The number of boxes to be kept by NMS, Only work in `GARPNHead`. + max_per_img=1000, # The number of boxes to be kept after NMS. + nms=dict( # Config of NMS + type='nms', # Type of NMS + iou_threshold=0.7 # NMS threshold + ), + min_bbox_size=0), # The allowed minimal box size + rcnn=dict( # The config for the roi heads. + assigner=dict( # Config of assigner for second stage, this is different for that in rpn + type='MaxIoUAssigner', # Type of assigner, MaxIoUAssigner is used for all roi_heads for now. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/max_iou_assigner.py#L10 for more details. + pos_iou_thr=0.5, # IoU >= threshold 0.5 will be taken as positive samples + neg_iou_thr=0.5, # IoU < threshold 0.5 will be taken as negative samples + min_pos_iou=0.5, # The minimal IoU threshold to take boxes as positive samples + match_low_quality=False, # Whether to match the boxes under low quality (see API doc for more details). + ignore_iof_thr=-1), # IoF threshold for ignoring bboxes + sampler=dict( + type='RandomSampler', # Type of sampler, PseudoSampler and other samplers are also supported. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/samplers/random_sampler.py#L8 for implementation details. + num=512, # Number of samples + pos_fraction=0.25, # The ratio of positive samples in the total samples. + neg_pos_ub=-1, # The upper bound of negative samples based on the number of positive samples. + add_gt_as_proposals=True + ), # Whether add GT as proposals after sampling. + mask_size=28, # Size of mask + pos_weight=-1, # The weight of positive samples during training. + debug=False)) # Whether to set the debug mode +test_cfg = dict( # Config for testing hyperparameters for rpn and rcnn + rpn=dict( # The config to generate proposals during testing + nms_across_levels=False, # Whether to do NMS for boxes across levels. Only work in `GARPNHead`, naive rpn does not support do nms cross levels. + nms_pre=1000, # The number of boxes before NMS + nms_post=1000, # The number of boxes to be kept by NMS, Only work in `GARPNHead`. + max_per_img=1000, # The number of boxes to be kept after NMS. + nms=dict( # Config of NMS + type='nms', #Type of NMS + iou_threshold=0.7 # NMS threshold + ), + min_bbox_size=0), # The allowed minimal box size + rcnn=dict( # The config for the roi heads. + score_thr=0.05, # Threshold to filter out boxes + nms=dict( # Config of NMS in the second stage + type='nms', # Type of NMS + iou_thr=0.5), # NMS threshold + max_per_img=100, # Max number of detections of each image + mask_thr_binary=0.5)) # Threshold of mask prediction dataset_type = 'CocoDataset' # Dataset type, this will be used to define the dataset data_root = 'data/coco/' # Root path of data img_norm_cfg = dict( # Image normalization config to normalize the input images @@ -381,7 +381,7 @@ data = dict( ]) ], samples_per_gpu=2 # Batch size of a single GPU used in testing - )) + )) evaluation = dict( # The config to build the evaluation hook, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/evaluation/eval_hooks.py#L7 for more details. interval=1, # Evaluation interval metric=['bbox', 'segm']) # Metrics used during evaluation @@ -407,9 +407,15 @@ checkpoint_config = dict( # Config to set the checkpoint hook, Refer to https:/ log_config = dict( # config to register logger hook interval=50, # Interval to print the log hooks=[ - # dict(type='TensorboardLoggerHook') # The Tensorboard logger is also supported - dict(type='TextLoggerHook') + dict(type='TextLoggerHook', by_epoch=False), + dict(type='TensorboardLoggerHook', by_epoch=False), + dict(type='MMDetWandbHook', by_epoch=False, # The Wandb logger is also supported, It requires `wandb` to be installed. + init_kwargs={'entity': "OpenMMLab", # The entity used to log on Wandb + 'project': "MMDet", # Project name in WandB + 'config': cfg_dict}), # Check https://docs.wandb.ai/ref/python/init for more init arguments. + # MMDetWandbHook is mmdet implementation of WandbLoggerHook. ClearMLLoggerHook, DvcliveLoggerHook, MlflowLoggerHook, NeptuneLoggerHook, PaviLoggerHook, SegmindLoggerHook are also supported based on MMCV implementation. ]) # The logger used to record the training process. + dist_params = dict(backend='nccl') # Parameters to setup distributed training, the port can also be set. log_level = 'INFO' # The level of logging. load_from = None # load models as a pre-trained model from a given path. This will not resume training. diff --git a/docs/zh_cn/tutorials/config.md b/docs/zh_cn/tutorials/config.md index 42b098f63b3..40460878006 100644 --- a/docs/zh_cn/tutorials/config.md +++ b/docs/zh_cn/tutorials/config.md @@ -56,8 +56,8 @@ ```python # 已经弃用的形式 model = dict( - type=..., - ... + type=..., + ... ) train_cfg=dict(...) test_cfg=dict(...) @@ -68,10 +68,10 @@ test_cfg=dict(...) ```python # 推荐的形式 model = dict( - type=..., - ... - train_cfg=dict(...), - test_cfg=dict(...), + type=..., + ... +train_cfg=dict(...), + test_cfg=dict(...), ) ``` @@ -93,7 +93,7 @@ model = dict( requires_grad=True), # 是否训练归一化里的 gamma 和 beta。 norm_eval=True, # 是否冻结 BN 里的统计项。 style='pytorch', # 主干网络的风格,'pytorch' 意思是步长为2的层为 3x3 卷积, 'caffe' 意思是步长为2的层为 1x1 卷积。 - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), # 加载通过 ImageNet 预训练的模型 + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), # 加载通过 ImageNet 预训练的模型 neck=dict( type='FPN', # 检测器的 neck 是 FPN,我们同样支持 'NASFPN', 'PAFPN' 等,更多细节可以参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/necks/fpn.py#L10。 in_channels=[256, 512, 1024, 2048], # 输入通道数,这与主干网络的输出通道一致 @@ -165,70 +165,70 @@ model = dict( type='CrossEntropyLoss', # 用于分割的损失类型。 use_mask=True, # 是否只在正确的类中训练 mask。 loss_weight=1.0)))) # mask 分支的损失权重. - train_cfg = dict( # rpn 和 rcnn 训练超参数的配置 - rpn=dict( # rpn 的训练配置 - assigner=dict( # 分配器(assigner)的配置 - type='MaxIoUAssigner', # 分配器的类型,MaxIoUAssigner 用于许多常见的检测器,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/max_iou_assigner.py#L10。 - pos_iou_thr=0.7, # IoU >= 0.7(阈值) 被视为正样本。 - neg_iou_thr=0.3, # IoU < 0.3(阈值) 被视为负样本。 - min_pos_iou=0.3, # 将框作为正样本的最小 IoU 阈值。 - match_low_quality=True, # 是否匹配低质量的框(更多细节见 API 文档). - ignore_iof_thr=-1), # 忽略 bbox 的 IoF 阈值。 - sampler=dict( # 正/负采样器(sampler)的配置 - type='RandomSampler', # 采样器类型,还支持 PseudoSampler 和其他采样器,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/samplers/random_sampler.py#L8。 - num=256, # 样本数量。 - pos_fraction=0.5, # 正样本占总样本的比例。 - neg_pos_ub=-1, # 基于正样本数量的负样本上限。 - add_gt_as_proposals=False), # 采样后是否添加 GT 作为 proposal。 - allowed_border=-1, # 填充有效锚点后允许的边框。 - pos_weight=-1, # 训练期间正样本的权重。 - debug=False), # 是否设置调试(debug)模式 - rpn_proposal=dict( # 在训练期间生成 proposals 的配置 - nms_across_levels=False, # 是否对跨层的 box 做 NMS。仅适用于 `GARPNHead` ,naive rpn 不支持 nms cross levels。 - nms_pre=2000, # NMS 前的 box 数 - nms_post=1000, # NMS 要保留的 box 的数量,只在 GARPNHHead 中起作用。 - max_per_img=1000, # NMS 后要保留的 box 数量。 - nms=dict( # NMS 的配置 - type='nms', # NMS 的类别 - iou_threshold=0.7 # NMS 的阈值 - ), - min_bbox_size=0), # 允许的最小 box 尺寸 - rcnn=dict( # roi head 的配置。 - assigner=dict( # 第二阶段分配器的配置,这与 rpn 中的不同 - type='MaxIoUAssigner', # 分配器的类型,MaxIoUAssigner 目前用于所有 roi_heads。更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/max_iou_assigner.py#L10。 - pos_iou_thr=0.5, # IoU >= 0.5(阈值)被认为是正样本。 - neg_iou_thr=0.5, # IoU < 0.5(阈值)被认为是负样本。 - min_pos_iou=0.5, # 将 box 作为正样本的最小 IoU 阈值 - match_low_quality=False, # 是否匹配低质量下的 box(有关更多详细信息,请参阅 API 文档)。 - ignore_iof_thr=-1), # 忽略 bbox 的 IoF 阈值 - sampler=dict( - type='RandomSampler', #采样器的类型,还支持 PseudoSampler 和其他采样器,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/samplers/random_sampler.py#L8。 - num=512, # 样本数量 - pos_fraction=0.25, # 正样本占总样本的比例。. - neg_pos_ub=-1, # 基于正样本数量的负样本上限。. - add_gt_as_proposals=True - ), # 采样后是否添加 GT 作为 proposal。 - mask_size=28, # mask 的大小 - pos_weight=-1, # 训练期间正样本的权重。 - debug=False)) # 是否设置调试模式。 - test_cfg = dict( # 用于测试 rpn 和 rcnn 超参数的配置 - rpn=dict( # 测试阶段生成 proposals 的配置 - nms_across_levels=False, # 是否对跨层的 box 做 NMS。仅适用于`GARPNHead`,naive rpn 不支持做 NMS cross levels。 - nms_pre=1000, # NMS 前的 box 数 - nms_post=1000, # NMS 要保留的 box 的数量,只在`GARPNHHead`中起作用。 - max_per_img=1000, # NMS 后要保留的 box 数量 - nms=dict( # NMS 的配置 - type='nms', # NMS 的类型 - iou_threshold=0.7 # NMS 阈值 - ), - min_bbox_size=0), # box 允许的最小尺寸 - rcnn=dict( # roi heads 的配置 - score_thr=0.05, # bbox 的分数阈值 - nms=dict( # 第二步的 NMS 配置 - type='nms', # NMS 的类型 - iou_thr=0.5), # NMS 的阈值 - max_per_img=100, # 每张图像的最大检测次数 - mask_thr_binary=0.5)) # mask 预处的阈值 +train_cfg = dict( # rpn 和 rcnn 训练超参数的配置 + rpn=dict( # rpn 的训练配置 + assigner=dict( # 分配器(assigner)的配置 + type='MaxIoUAssigner', # 分配器的类型,MaxIoUAssigner 用于许多常见的检测器,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/max_iou_assigner.py#L10。 + pos_iou_thr=0.7, # IoU >= 0.7(阈值) 被视为正样本。 + neg_iou_thr=0.3, # IoU < 0.3(阈值) 被视为负样本。 + min_pos_iou=0.3, # 将框作为正样本的最小 IoU 阈值。 + match_low_quality=True, # 是否匹配低质量的框(更多细节见 API 文档). + ignore_iof_thr=-1), # 忽略 bbox 的 IoF 阈值。 + sampler=dict( # 正/负采样器(sampler)的配置 + type='RandomSampler', # 采样器类型,还支持 PseudoSampler 和其他采样器,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/samplers/random_sampler.py#L8。 + num=256, # 样本数量。 + pos_fraction=0.5, # 正样本占总样本的比例。 + neg_pos_ub=-1, # 基于正样本数量的负样本上限。 + add_gt_as_proposals=False), # 采样后是否添加 GT 作为 proposal。 + allowed_border=-1, # 填充有效锚点后允许的边框。 + pos_weight=-1, # 训练期间正样本的权重。 + debug=False), # 是否设置调试(debug)模式 + rpn_proposal=dict( # 在训练期间生成 proposals 的配置 + nms_across_levels=False, # 是否对跨层的 box 做 NMS。仅适用于 `GARPNHead` ,naive rpn 不支持 nms cross levels。 + nms_pre=2000, # NMS 前的 box 数 + nms_post=1000, # NMS 要保留的 box 的数量,只在 GARPNHHead 中起作用。 + max_per_img=1000, # NMS 后要保留的 box 数量。 + nms=dict( # NMS 的配置 + type='nms', # NMS 的类别 + iou_threshold=0.7 # NMS 的阈值 + ), + min_bbox_size=0), # 允许的最小 box 尺寸 + rcnn=dict( # roi head 的配置。 + assigner=dict( # 第二阶段分配器的配置,这与 rpn 中的不同 + type='MaxIoUAssigner', # 分配器的类型,MaxIoUAssigner 目前用于所有 roi_heads。更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/max_iou_assigner.py#L10。 + pos_iou_thr=0.5, # IoU >= 0.5(阈值)被认为是正样本。 + neg_iou_thr=0.5, # IoU < 0.5(阈值)被认为是负样本。 + min_pos_iou=0.5, # 将 box 作为正样本的最小 IoU 阈值 + match_low_quality=False, # 是否匹配低质量下的 box(有关更多详细信息,请参阅 API 文档)。 + ignore_iof_thr=-1), # 忽略 bbox 的 IoF 阈值 + sampler=dict( + type='RandomSampler', #采样器的类型,还支持 PseudoSampler 和其他采样器,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/samplers/random_sampler.py#L8。 + num=512, # 样本数量 + pos_fraction=0.25, # 正样本占总样本的比例。. + neg_pos_ub=-1, # 基于正样本数量的负样本上限。. + add_gt_as_proposals=True + ), # 采样后是否添加 GT 作为 proposal。 + mask_size=28, # mask 的大小 + pos_weight=-1, # 训练期间正样本的权重。 + debug=False)) # 是否设置调试模式。 +test_cfg = dict( # 用于测试 rpn 和 rcnn 超参数的配置 + rpn=dict( # 测试阶段生成 proposals 的配置 + nms_across_levels=False, # 是否对跨层的 box 做 NMS。仅适用于`GARPNHead`,naive rpn 不支持做 NMS cross levels。 + nms_pre=1000, # NMS 前的 box 数 + nms_post=1000, # NMS 要保留的 box 的数量,只在`GARPNHHead`中起作用。 + max_per_img=1000, # NMS 后要保留的 box 数量 + nms=dict( # NMS 的配置 + type='nms', # NMS 的类型 + iou_threshold=0.7 # NMS 阈值 + ), + min_bbox_size=0), # box 允许的最小尺寸 + rcnn=dict( # roi heads 的配置 + score_thr=0.05, # bbox 的分数阈值 + nms=dict( # 第二步的 NMS 配置 + type='nms', # NMS 的类型 + iou_thr=0.5), # NMS 的阈值 + max_per_img=100, # 每张图像的最大检测次数 + mask_thr_binary=0.5)) # mask 预处的阈值 dataset_type = 'CocoDataset' # 数据集类型,这将被用来定义数据集。 data_root = 'data/coco/' # 数据的根路径。 img_norm_cfg = dict( # 图像归一化配置,用来归一化输入的图像。 @@ -364,7 +364,7 @@ data = dict( ]) ], samples_per_gpu=2 # 单个 GPU 测试时的 Batch size - )) + )) evaluation = dict( # evaluation hook 的配置,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/evaluation/eval_hooks.py#L7。 interval=1, # 验证的间隔。 metric=['bbox', 'segm']) # 验证期间使用的指标。 @@ -389,10 +389,15 @@ checkpoint_config = dict( # Checkpoint hook 的配置文件。执行时请参 interval=1) # 保存的间隔是 1。 log_config = dict( # register logger hook 的配置文件。 interval=50, # 打印日志的间隔 - hooks=[ - # dict(type='TensorboardLoggerHook') # 同样支持 Tensorboard 日志 - dict(type='TextLoggerHook') + hooks=[ # 训练期间执行的钩子 + dict(type='TextLoggerHook', by_epoch=False), + dict(type='TensorboardLoggerHook', by_epoch=False), + dict(type='MMDetWandbHook', by_epoch=False, # 还支持 Wandb 记录器,它需要安装 `wandb`。 + init_kwargs={'entity': "OpenMMLab", # 用于登录wandb的实体 + 'project': "MMDet", # WandB中的项目名称 + 'config': cfg_dict}), # 检查 https://docs.wandb.ai/ref/python/init 以获取更多初始化参数 ]) # 用于记录训练过程的记录器(logger)。 + dist_params = dict(backend='nccl') # 用于设置分布式训练的参数,端口也同样可被设置。 log_level = 'INFO' # 日志的级别。 load_from = None # 从一个给定路径里加载模型作为预训练模型,它并不会消耗训练时间。 From d83266eaa60fd96227c72a1f3545280a918922c9 Mon Sep 17 00:00:00 2001 From: QingyunLi <962537281@qq.com> Date: Thu, 8 Sep 2022 10:46:10 +0800 Subject: [PATCH 15/23] fix: delete asserts of loss and assigner coef checking --- mmdet/models/dense_heads/detr_head.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/mmdet/models/dense_heads/detr_head.py b/mmdet/models/dense_heads/detr_head.py index de1913c9db1..6b7ee13cb68 100644 --- a/mmdet/models/dense_heads/detr_head.py +++ b/mmdet/models/dense_heads/detr_head.py @@ -107,15 +107,6 @@ def __init__(self, assert 'assigner' in train_cfg, 'assigner should be provided '\ 'when train_cfg is set.' assigner = train_cfg['assigner'] - assert loss_cls['loss_weight'] == assigner['cls_cost']['weight'], \ - 'The classification weight for loss and matcher should be' \ - 'exactly the same.' - assert loss_bbox['loss_weight'] == assigner['reg_cost'][ - 'weight'], 'The regression L1 weight for loss and matcher ' \ - 'should be exactly the same.' - assert loss_iou['loss_weight'] == assigner['iou_cost']['weight'], \ - 'The regression iou weight for loss and matcher should be' \ - 'exactly the same.' self.assigner = build_assigner(assigner) # DETR sampling=False, so use PseudoSampler sampler_cfg = dict(type='PseudoSampler') From baa7b8c36bd60a360123b4340fbc13475e853be0 Mon Sep 17 00:00:00 2001 From: QingyunLi <962537281@qq.com> Date: Thu, 8 Sep 2022 14:06:41 +0800 Subject: [PATCH 16/23] refactor: refactor detr-related modules in mmdet/models/utils/transformer.py --- mmdet/models/utils/transformer.py | 854 ++++++++---------------------- 1 file changed, 227 insertions(+), 627 deletions(-) diff --git a/mmdet/models/utils/transformer.py b/mmdet/models/utils/transformer.py index 3c390c83a1a..41579f26cff 100644 --- a/mmdet/models/utils/transformer.py +++ b/mmdet/models/utils/transformer.py @@ -1,33 +1,19 @@ # Copyright (c) OpenMMLab. All rights reserved. +import copy import math -import warnings from typing import Sequence import torch import torch.nn as nn import torch.nn.functional as F -from mmcv.cnn import (build_activation_layer, build_conv_layer, - build_norm_layer, xavier_init) -from mmcv.cnn.bricks.registry import (TRANSFORMER_LAYER, - TRANSFORMER_LAYER_SEQUENCE) -from mmcv.cnn.bricks.transformer import (BaseTransformerLayer, - TransformerLayerSequence, - build_transformer_layer_sequence) +from mmcv.cnn import build_activation_layer, build_conv_layer, build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention from mmcv.runner.base_module import BaseModule from mmcv.utils import to_2tuple -from torch.nn.init import normal_ +from torch.nn import ModuleList from mmdet.models.utils.builder import TRANSFORMER -try: - from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention - -except ImportError: - warnings.warn( - '`MultiScaleDeformableAttention` in MMCV has been moved to ' - '`mmcv.ops.multi_scale_deform_attn`, please update your MMCV') - from mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention - def nlc_to_nchw(x, hw_shape): """Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor. @@ -404,659 +390,273 @@ def inverse_sigmoid(x, eps=1e-5): return torch.log(x1 / x2) -@TRANSFORMER_LAYER.register_module() -class DetrTransformerDecoderLayer(BaseTransformerLayer): - """Implements decoder layer in DETR transformer. - - Args: - attn_cfgs (list[`mmcv.ConfigDict`] | list[dict] | dict )): - Configs for self_attention or cross_attention, the order - should be consistent with it in `operation_order`. If it is - a dict, it would be expand to the number of attention in - `operation_order`. - feedforward_channels (int): The hidden dimension for FFNs. - ffn_dropout (float): Probability of an element to be zeroed - in ffn. Default 0.0. - operation_order (tuple[str]): The execution order of operation - in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). - Default:None - act_cfg (dict): The activation config for FFNs. Default: `LN` - norm_cfg (dict): Config dict for normalization layer. - Default: `LN`. - ffn_num_fcs (int): The number of fully-connected layers in FFNs. - Default:2. - """ +class DetrTransformerEncoder(BaseModule): def __init__(self, - attn_cfgs, - feedforward_channels, - ffn_dropout=0.0, - operation_order=None, - act_cfg=dict(type='ReLU', inplace=True), - norm_cfg=dict(type='LN'), - ffn_num_fcs=2, - **kwargs): - super(DetrTransformerDecoderLayer, self).__init__( - attn_cfgs=attn_cfgs, - feedforward_channels=feedforward_channels, - ffn_dropout=ffn_dropout, - operation_order=operation_order, - act_cfg=act_cfg, - norm_cfg=norm_cfg, - ffn_num_fcs=ffn_num_fcs, - **kwargs) - assert len(operation_order) == 6 - assert set(operation_order) == set( - ['self_attn', 'norm', 'cross_attn', 'ffn']) - - -@TRANSFORMER_LAYER_SEQUENCE.register_module() -class DetrTransformerEncoder(TransformerLayerSequence): - """TransformerEncoder of DETR. - - Args: - post_norm_cfg (dict): Config of last normalization layer. Default: - `LN`. Only used when `self.pre_norm` is `True` - """ + layers_cfg=None, + num_layers=None, + post_norm_cfg=dict(type='LN'), + init_cfg=None): - def __init__(self, *args, post_norm_cfg=dict(type='LN'), **kwargs): - super(DetrTransformerEncoder, self).__init__(*args, **kwargs) - if post_norm_cfg is not None: - self.post_norm = build_norm_layer( - post_norm_cfg, self.embed_dims)[1] if self.pre_norm else None + super().__init__(init_cfg) + if isinstance(layers_cfg, dict): + layers_cfg = [copy.deepcopy(layers_cfg) for _ in range(num_layers)] else: - assert not self.pre_norm, f'Use prenorm in ' \ - f'{self.__class__.__name__},' \ - f'Please specify post_norm_cfg' - self.post_norm = None - - def forward(self, *args, **kwargs): - """Forward function for `TransformerCoder`. + assert isinstance(layers_cfg, list) and \ + len(layers_cfg) == num_layers # TODO + self.layers_cfg = layers_cfg # TODO + self.post_norm_cfg = post_norm_cfg + self.num_layers = num_layers + self._init_layers() + self.embed_dims = self.layers[0].embed_dims # TODO + self.post_norm = build_norm_layer(self.post_norm_cfg, + self.embed_dims)[1] + + def _init_layers(self): + self.layers = ModuleList() + for i in range(self.num_layers): + self.layers.append( + DetrTransformerEncoderLayer(**self.layers_cfg[i])) - Returns: - Tensor: forwarded results with shape [num_query, bs, embed_dims]. - """ - x = super(DetrTransformerEncoder, self).forward(*args, **kwargs) + def forward(self, + query, + key, + value, + query_pos=None, + key_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + **kwargs): + for layer in self.layers: + query = layer( + query, + key, + value, + query_pos=query_pos, + key_pos=key_pos, + attn_masks=attn_masks, + query_key_padding_mask=query_key_padding_mask, + key_padding_mask=key_padding_mask, + **kwargs) if self.post_norm is not None: - x = self.post_norm(x) - return x + query = self.post_norm(query) + return query -@TRANSFORMER_LAYER_SEQUENCE.register_module() -class DetrTransformerDecoder(TransformerLayerSequence): - """Implements the decoder in DETR transformer. - - Args: - return_intermediate (bool): Whether to return intermediate outputs. - post_norm_cfg (dict): Config of last normalization layer. Default: - `LN`. - """ +class DetrTransformerDecoder(BaseModule): def __init__(self, - *args, + layers_cfg=None, + num_layers=None, post_norm_cfg=dict(type='LN'), - return_intermediate=False, - **kwargs): - - super(DetrTransformerDecoder, self).__init__(*args, **kwargs) - self.return_intermediate = return_intermediate - if post_norm_cfg is not None: - self.post_norm = build_norm_layer(post_norm_cfg, - self.embed_dims)[1] + return_intermediate=True, + init_cfg=None): + super().__init__(init_cfg) + if isinstance(layers_cfg, dict): + layers_cfg = [copy.deepcopy(layers_cfg) for _ in range(num_layers)] else: - self.post_norm = None - - def forward(self, query, *args, **kwargs): - """Forward function for `TransformerDecoder`. - - Args: - query (Tensor): Input query with shape - `(num_query, bs, embed_dims)`. + assert isinstance(layers_cfg, list) and \ + len(layers_cfg) == num_layers # TODO + self.layers_cfg = layers_cfg # TODO + self.num_layers = num_layers + self.post_norm_cfg = post_norm_cfg + self.return_intermediate = return_intermediate + self._init_layers() + self.embed_dims = self.layers[0].embed_dims # TODO + self.post_norm = build_norm_layer(self.post_norm_cfg, + self.embed_dims)[1] - Returns: - Tensor: Results with shape [1, num_query, bs, embed_dims] when - return_intermediate is `False`, otherwise it has shape - [num_layers, num_query, bs, embed_dims]. - """ - if not self.return_intermediate: - x = super().forward(query, *args, **kwargs) - if self.post_norm: - x = self.post_norm(x)[None] - return x + def _init_layers(self): + self.layers = ModuleList() + for i in range(self.num_layers): + self.layers.append( + DetrTransformerDecoderLayer(**self.layers_cfg[i])) + def forward(self, + query, + key, + value, + query_pos=None, + key_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + **kwargs): intermediate = [] for layer in self.layers: - query = layer(query, *args, **kwargs) + query = layer( + query, + key, + value, + query_pos=query_pos, + key_pos=key_pos, + attn_masks=attn_masks, + query_key_padding_mask=query_key_padding_mask, + key_padding_mask=key_padding_mask, + **kwargs) if self.return_intermediate: - if self.post_norm is not None: - intermediate.append(self.post_norm(query)) - else: - intermediate.append(query) - return torch.stack(intermediate) - - -@TRANSFORMER.register_module() -class Transformer(BaseModule): - """Implements the DETR transformer. - - Following the official DETR implementation, this module copy-paste - from torch.nn.Transformer with modifications: - - * positional encodings are passed in MultiheadAttention - * extra LN at the end of encoder is removed - * decoder returns a stack of activations from all decoding layers - - See `paper: End-to-End Object Detection with Transformers - `_ for details. - - Args: - encoder (`mmcv.ConfigDict` | Dict): Config of - TransformerEncoder. Defaults to None. - decoder ((`mmcv.ConfigDict` | Dict)): Config of - TransformerDecoder. Defaults to None - init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. - Defaults to None. - """ - - def __init__(self, encoder=None, decoder=None, init_cfg=None): - super(Transformer, self).__init__(init_cfg=init_cfg) - self.encoder = build_transformer_layer_sequence(encoder) - self.decoder = build_transformer_layer_sequence(decoder) - self.embed_dims = self.encoder.embed_dims + intermediate.append(query) - def init_weights(self): - # follow the official DETR to init parameters - for m in self.modules(): - if hasattr(m, 'weight') and m.weight.dim() > 1: - xavier_init(m, distribution='uniform') - self._is_init = True + if self.post_norm is not None: + query = self.post_norm(query) + if self.return_intermediate: + intermediate.pop() + intermediate.append(query) - def forward(self, x, mask, query_embed, pos_embed): - """Forward function for `Transformer`. + if self.return_intermediate: + return torch.stack(intermediate) - Args: - x (Tensor): Input query with shape [bs, c, h, w] where - c = embed_dims. - mask (Tensor): The key_padding_mask used for encoder and decoder, - with shape [bs, h, w]. - query_embed (Tensor): The query embedding for decoder, with shape - [num_query, c]. - pos_embed (Tensor): The positional encoding for encoder and - decoder, with the same shape as `x`. + return query - Returns: - tuple[Tensor]: results of decoder containing the following tensor. - - - out_dec: Output from decoder. If return_intermediate_dec \ - is True output has shape [num_dec_layers, bs, - num_query, embed_dims], else has shape [1, bs, \ - num_query, embed_dims]. - - memory: Output results from encoder, with shape \ - [bs, embed_dims, h, w]. - """ - bs, c, h, w = x.shape - # use `view` instead of `flatten` for dynamically exporting to ONNX - x = x.view(bs, c, -1).permute(2, 0, 1) # [bs, c, h, w] -> [h*w, bs, c] - pos_embed = pos_embed.view(bs, c, -1).permute(2, 0, 1) - query_embed = query_embed.unsqueeze(1).repeat( - 1, bs, 1) # [num_query, dim] -> [num_query, bs, dim] - mask = mask.view(bs, -1) # [bs, h, w] -> [bs, h*w] - memory = self.encoder( - query=x, - key=None, - value=None, - query_pos=pos_embed, - query_key_padding_mask=mask) - target = torch.zeros_like(query_embed) - # out_dec: [num_layers, num_query, bs, dim] - out_dec = self.decoder( - query=target, - key=memory, - value=memory, - key_pos=pos_embed, - query_pos=query_embed, - key_padding_mask=mask) - out_dec = out_dec.transpose(1, 2) - memory = memory.permute(1, 2, 0).reshape(bs, c, h, w) - return out_dec, memory - - -@TRANSFORMER_LAYER_SEQUENCE.register_module() -class DeformableDetrTransformerDecoder(TransformerLayerSequence): - """Implements the decoder in DETR transformer. - Args: - return_intermediate (bool): Whether to return intermediate outputs. - coder_norm_cfg (dict): Config of last normalization layer. Default: - `LN`. - """ +class DetrTransformerEncoderLayer(BaseModule): - def __init__(self, *args, return_intermediate=False, **kwargs): + def __init__(self, + self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0., + act_cfg=dict(type='ReLU', inplace=True), + ), + norm_cfg=dict(type='LN'), + init_cfg=None, + batch_first=False): - super(DeformableDetrTransformerDecoder, self).__init__(*args, **kwargs) - self.return_intermediate = return_intermediate + super(DetrTransformerEncoderLayer, self).__init__(init_cfg) + if 'batch_first' in self_attn_cfg: # TODO + assert batch_first == self_attn_cfg['batch_first'] + else: + self_attn_cfg['batch_first'] = batch_first + self.batch_first = batch_first # TODO + self.self_attn_cfg = self_attn_cfg # TODO + self.ffn_cfg = ffn_cfg # TODO + self.norm_cfg = norm_cfg # TODO + self._init_layers() + + def _init_layers(self): + self.self_attn = MultiheadAttention(**self.self_attn_cfg) + self.self_attn.operation_name = 'self_attn' + self.embed_dims = self.self_attn.embed_dims # TODO + self.ffn = FFN(**self.ffn_cfg) + norms_list = [ + build_norm_layer(self.norm_cfg, self.embed_dims)[1] + for _ in range(2) + ] + self.norms = ModuleList(norms_list) def forward(self, query, - *args, - reference_points=None, - valid_ratios=None, - reg_branches=None, + query_pos=None, + attn_masks=None, + query_key_padding_mask=None, **kwargs): - """Forward function for `TransformerDecoder`. - - Args: - query (Tensor): Input query with shape - `(num_query, bs, embed_dims)`. - reference_points (Tensor): The reference - points of offset. has shape - (bs, num_query, 4) when as_two_stage, - otherwise has shape ((bs, num_query, 2). - valid_ratios (Tensor): The radios of valid - points on the feature map, has shape - (bs, num_levels, 2) - reg_branch: (obj:`nn.ModuleList`): Used for - refining the regression results. Only would - be passed when with_box_refine is True, - otherwise would be passed a `None`. - Returns: - Tensor: Results with shape [1, num_query, bs, embed_dims] when - return_intermediate is `False`, otherwise it has shape - [num_layers, num_query, bs, embed_dims]. - """ - output = query - intermediate = [] - intermediate_reference_points = [] - for lid, layer in enumerate(self.layers): - if reference_points.shape[-1] == 4: - reference_points_input = reference_points[:, :, None] * \ - torch.cat([valid_ratios, valid_ratios], -1)[:, None] - else: - assert reference_points.shape[-1] == 2 - reference_points_input = reference_points[:, :, None] * \ - valid_ratios[:, None] - output = layer( - output, - *args, - reference_points=reference_points_input, - **kwargs) - output = output.permute(1, 0, 2) - - if reg_branches is not None: - tmp = reg_branches[lid](output) - if reference_points.shape[-1] == 4: - new_reference_points = tmp + inverse_sigmoid( - reference_points) - new_reference_points = new_reference_points.sigmoid() - else: - assert reference_points.shape[-1] == 2 - new_reference_points = tmp - new_reference_points[..., :2] = tmp[ - ..., :2] + inverse_sigmoid(reference_points) - new_reference_points = new_reference_points.sigmoid() - reference_points = new_reference_points.detach() - - output = output.permute(1, 0, 2) - if self.return_intermediate: - intermediate.append(output) - intermediate_reference_points.append(reference_points) - - if self.return_intermediate: - return torch.stack(intermediate), torch.stack( - intermediate_reference_points) + query = self.self_attn( + query=query, + key=query, + value=query, + query_pos=query_pos, + key_pos=query_pos, + attn_mask=attn_masks, + key_padding_mask=query_key_padding_mask, + **kwargs) + query = self.norms[0](query) + query = self.ffn(query) + query = self.norms[1](query) - return output, reference_points + return query -@TRANSFORMER.register_module() -class DeformableDetrTransformer(Transformer): - """Implements the DeformableDETR transformer. - - Args: - as_two_stage (bool): Generate query from encoder features. - Default: False. - num_feature_levels (int): Number of feature maps from FPN: - Default: 4. - two_stage_num_proposals (int): Number of proposals when set - `as_two_stage` as True. Default: 300. - """ +class DetrTransformerDecoderLayer(BaseModule): def __init__(self, - as_two_stage=False, - num_feature_levels=4, - two_stage_num_proposals=300, - **kwargs): - super(DeformableDetrTransformer, self).__init__(**kwargs) - self.as_two_stage = as_two_stage - self.num_feature_levels = num_feature_levels - self.two_stage_num_proposals = two_stage_num_proposals - self.embed_dims = self.encoder.embed_dims - self.init_layers() - - def init_layers(self): - """Initialize layers of the DeformableDetrTransformer.""" - self.level_embeds = nn.Parameter( - torch.Tensor(self.num_feature_levels, self.embed_dims)) - - if self.as_two_stage: - self.enc_output = nn.Linear(self.embed_dims, self.embed_dims) - self.enc_output_norm = nn.LayerNorm(self.embed_dims) - self.pos_trans = nn.Linear(self.embed_dims * 2, - self.embed_dims * 2) - self.pos_trans_norm = nn.LayerNorm(self.embed_dims * 2) - else: - self.reference_points = nn.Linear(self.embed_dims, 2) - - def init_weights(self): - """Initialize the transformer weights.""" - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - for m in self.modules(): - if isinstance(m, MultiScaleDeformableAttention): - m.init_weights() - if not self.as_two_stage: - xavier_init(self.reference_points, distribution='uniform', bias=0.) - normal_(self.level_embeds) - - def gen_encoder_output_proposals(self, memory, memory_padding_mask, - spatial_shapes): - """Generate proposals from encoded memory. - - Args: - memory (Tensor) : The output of encoder, - has shape (bs, num_key, embed_dim). num_key is - equal the number of points on feature map from - all level. - memory_padding_mask (Tensor): Padding mask for memory. - has shape (bs, num_key). - spatial_shapes (Tensor): The shape of all feature maps. - has shape (num_level, 2). - - Returns: - tuple: A tuple of feature map and bbox prediction. - - - output_memory (Tensor): The input of decoder, \ - has shape (bs, num_key, embed_dim). num_key is \ - equal the number of points on feature map from \ - all levels. - - output_proposals (Tensor): The normalized proposal \ - after a inverse sigmoid, has shape \ - (bs, num_keys, 4). - """ - - N, S, C = memory.shape - proposals = [] - _cur = 0 - for lvl, (H, W) in enumerate(spatial_shapes): - mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H * W)].view( - N, H, W, 1) - valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1) - valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1) - - grid_y, grid_x = torch.meshgrid( - torch.linspace( - 0, H - 1, H, dtype=torch.float32, device=memory.device), - torch.linspace( - 0, W - 1, W, dtype=torch.float32, device=memory.device)) - grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) - - scale = torch.cat([valid_W.unsqueeze(-1), - valid_H.unsqueeze(-1)], 1).view(N, 1, 1, 2) - grid = (grid.unsqueeze(0).expand(N, -1, -1, -1) + 0.5) / scale - wh = torch.ones_like(grid) * 0.05 * (2.0**lvl) - proposal = torch.cat((grid, wh), -1).view(N, -1, 4) - proposals.append(proposal) - _cur += (H * W) - output_proposals = torch.cat(proposals, 1) - output_proposals_valid = ((output_proposals > 0.01) & - (output_proposals < 0.99)).all( - -1, keepdim=True) - output_proposals = torch.log(output_proposals / (1 - output_proposals)) - output_proposals = output_proposals.masked_fill( - memory_padding_mask.unsqueeze(-1), float('inf')) - output_proposals = output_proposals.masked_fill( - ~output_proposals_valid, float('inf')) - - output_memory = memory - output_memory = output_memory.masked_fill( - memory_padding_mask.unsqueeze(-1), float(0)) - output_memory = output_memory.masked_fill(~output_proposals_valid, - float(0)) - output_memory = self.enc_output_norm(self.enc_output(output_memory)) - return output_memory, output_proposals - - @staticmethod - def get_reference_points(spatial_shapes, valid_ratios, device): - """Get the reference points used in decoder. - - Args: - spatial_shapes (Tensor): The shape of all - feature maps, has shape (num_level, 2). - valid_ratios (Tensor): The radios of valid - points on the feature map, has shape - (bs, num_levels, 2) - device (obj:`device`): The device where - reference_points should be. + self_attn_cfg=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + dropout=0.0), + cross_attn_cfg=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + dropout=0.0), + ffn_cfg=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0., + act_cfg=dict(type='ReLU', inplace=True), + ), + norm_cfg=dict(type='LN'), + init_cfg=None, + batch_first=False): - Returns: - Tensor: reference points used in decoder, has \ - shape (bs, num_keys, num_levels, 2). - """ - reference_points_list = [] - for lvl, (H, W) in enumerate(spatial_shapes): - # TODO check this 0.5 - ref_y, ref_x = torch.meshgrid( - torch.linspace( - 0.5, H - 0.5, H, dtype=torch.float32, device=device), - torch.linspace( - 0.5, W - 0.5, W, dtype=torch.float32, device=device)) - ref_y = ref_y.reshape(-1)[None] / ( - valid_ratios[:, None, lvl, 1] * H) - ref_x = ref_x.reshape(-1)[None] / ( - valid_ratios[:, None, lvl, 0] * W) - ref = torch.stack((ref_x, ref_y), -1) - reference_points_list.append(ref) - reference_points = torch.cat(reference_points_list, 1) - reference_points = reference_points[:, :, None] * valid_ratios[:, None] - return reference_points - - def get_valid_ratio(self, mask): - """Get the valid radios of feature maps of all level.""" - _, H, W = mask.shape - valid_H = torch.sum(~mask[:, :, 0], 1) - valid_W = torch.sum(~mask[:, 0, :], 1) - valid_ratio_h = valid_H.float() / H - valid_ratio_w = valid_W.float() / W - valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1) - return valid_ratio - - def get_proposal_pos_embed(self, - proposals, - num_pos_feats=128, - temperature=10000): - """Get the position embedding of proposal.""" - scale = 2 * math.pi - dim_t = torch.arange( - num_pos_feats, dtype=torch.float32, device=proposals.device) - dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats) - # N, L, 4 - proposals = proposals.sigmoid() * scale - # N, L, 4, 128 - pos = proposals[:, :, :, None] / dim_t - # N, L, 4, 64, 2 - pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), - dim=4).flatten(2) - return pos + super(DetrTransformerDecoderLayer, self).__init__(init_cfg) + for attn_cfg in (self_attn_cfg, cross_attn_cfg): + if 'batch_first' in attn_cfg: + assert batch_first == attn_cfg['batch_first'] + else: + attn_cfg['batch_first'] = batch_first + self.batch_first = batch_first + self.self_attn_cfg = self_attn_cfg + self.cross_attn_cfg = cross_attn_cfg + self.ffn_cfg = ffn_cfg + self.norm_cfg = norm_cfg + self._init_layers() + + def _init_layers(self): + self.self_attn = MultiheadAttention(**self.self_attn_cfg) + self.self_attn.operation_name = 'self_attn' + self.cross_attn = MultiheadAttention(**self.cross_attn_cfg) + self.cross_attn.operation_name = 'cross_attn' + self.embed_dims = self.self_attn.embed_dims # TODO + self.ffn = FFN(**self.ffn_cfg) + norms_list = [ + build_norm_layer(self.norm_cfg, self.embed_dims)[1] + for _ in range(3) + ] + self.norms = ModuleList(norms_list) def forward(self, - mlvl_feats, - mlvl_masks, - query_embed, - mlvl_pos_embeds, - reg_branches=None, - cls_branches=None, + query, + key=None, + value=None, + query_pos=None, + key_pos=None, + self_attn_masks=None, + cross_attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, **kwargs): - """Forward function for `Transformer`. - - Args: - mlvl_feats (list(Tensor)): Input queries from - different level. Each element has shape - [bs, embed_dims, h, w]. - mlvl_masks (list(Tensor)): The key_padding_mask from - different level used for encoder and decoder, - each element has shape [bs, h, w]. - query_embed (Tensor): The query embedding for decoder, - with shape [num_query, c]. - mlvl_pos_embeds (list(Tensor)): The positional encoding - of feats from different level, has the shape - [bs, embed_dims, h, w]. - reg_branches (obj:`nn.ModuleList`): Regression heads for - feature maps from each decoder layer. Only would - be passed when - `with_box_refine` is True. Default to None. - cls_branches (obj:`nn.ModuleList`): Classification heads - for feature maps from each decoder layer. Only would - be passed when `as_two_stage` - is True. Default to None. - - Returns: - tuple[Tensor]: results of decoder containing the following tensor. - - - inter_states: Outputs from decoder. If - return_intermediate_dec is True output has shape \ - (num_dec_layers, bs, num_query, embed_dims), else has \ - shape (1, bs, num_query, embed_dims). - - init_reference_out: The initial value of reference \ - points, has shape (bs, num_queries, 4). - - inter_references_out: The internal value of reference \ - points in decoder, has shape \ - (num_dec_layers, bs,num_query, embed_dims) - - enc_outputs_class: The classification score of \ - proposals generated from \ - encoder's feature maps, has shape \ - (batch, h*w, num_classes). \ - Only would be returned when `as_two_stage` is True, \ - otherwise None. - - enc_outputs_coord_unact: The regression results \ - generated from encoder's feature maps., has shape \ - (batch, h*w, 4). Only would \ - be returned when `as_two_stage` is True, \ - otherwise None. - """ - assert self.as_two_stage or query_embed is not None - - feat_flatten = [] - mask_flatten = [] - lvl_pos_embed_flatten = [] - spatial_shapes = [] - for lvl, (feat, mask, pos_embed) in enumerate( - zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)): - bs, c, h, w = feat.shape - spatial_shape = (h, w) - spatial_shapes.append(spatial_shape) - feat = feat.flatten(2).transpose(1, 2) - mask = mask.flatten(1) - pos_embed = pos_embed.flatten(2).transpose(1, 2) - lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1) - lvl_pos_embed_flatten.append(lvl_pos_embed) - feat_flatten.append(feat) - mask_flatten.append(mask) - feat_flatten = torch.cat(feat_flatten, 1) - mask_flatten = torch.cat(mask_flatten, 1) - lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) - spatial_shapes = torch.as_tensor( - spatial_shapes, dtype=torch.long, device=feat_flatten.device) - level_start_index = torch.cat((spatial_shapes.new_zeros( - (1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) - valid_ratios = torch.stack( - [self.get_valid_ratio(m) for m in mlvl_masks], 1) - - reference_points = \ - self.get_reference_points(spatial_shapes, - valid_ratios, - device=feat.device) - - feat_flatten = feat_flatten.permute(1, 0, 2) # (H*W, bs, embed_dims) - lvl_pos_embed_flatten = lvl_pos_embed_flatten.permute( - 1, 0, 2) # (H*W, bs, embed_dims) - memory = self.encoder( - query=feat_flatten, - key=None, - value=None, - query_pos=lvl_pos_embed_flatten, - query_key_padding_mask=mask_flatten, - spatial_shapes=spatial_shapes, - reference_points=reference_points, - level_start_index=level_start_index, - valid_ratios=valid_ratios, + query = self.self_attn( + query=query, + key=query, + value=query, + query_pos=query_pos, + key_pos=query_pos, + attn_mask=self_attn_masks, + key_padding_mask=query_key_padding_mask, **kwargs) - - memory = memory.permute(1, 0, 2) - bs, _, c = memory.shape - if self.as_two_stage: - output_memory, output_proposals = \ - self.gen_encoder_output_proposals( - memory, mask_flatten, spatial_shapes) - enc_outputs_class = cls_branches[self.decoder.num_layers]( - output_memory) - enc_outputs_coord_unact = \ - reg_branches[ - self.decoder.num_layers](output_memory) + output_proposals - - topk = self.two_stage_num_proposals - # We only use the first channel in enc_outputs_class as foreground, - # the other (num_classes - 1) channels are actually not used. - # Its targets are set to be 0s, which indicates the first - # class (foreground) because we use [0, num_classes - 1] to - # indicate class labels, background class is indicated by - # num_classes (similar convention in RPN). - # See https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/deformable_detr_head.py#L241 # noqa - # This follows the official implementation of Deformable DETR. - topk_proposals = torch.topk( - enc_outputs_class[..., 0], topk, dim=1)[1] - topk_coords_unact = torch.gather( - enc_outputs_coord_unact, 1, - topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) - topk_coords_unact = topk_coords_unact.detach() - reference_points = topk_coords_unact.sigmoid() - init_reference_out = reference_points - pos_trans_out = self.pos_trans_norm( - self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact))) - query_pos, query = torch.split(pos_trans_out, c, dim=2) - else: - query_pos, query = torch.split(query_embed, c, dim=1) - query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1) - query = query.unsqueeze(0).expand(bs, -1, -1) - reference_points = self.reference_points(query_pos).sigmoid() - init_reference_out = reference_points - - # decoder - query = query.permute(1, 0, 2) - memory = memory.permute(1, 0, 2) - query_pos = query_pos.permute(1, 0, 2) - inter_states, inter_references = self.decoder( + query = self.norms[0](query) + query = self.cross_attn( query=query, - key=None, - value=memory, + key=key, + value=value, query_pos=query_pos, - key_padding_mask=mask_flatten, - reference_points=reference_points, - spatial_shapes=spatial_shapes, - level_start_index=level_start_index, - valid_ratios=valid_ratios, - reg_branches=reg_branches, + key_pos=key_pos, + attn_mask=cross_attn_masks, + key_padding_mask=key_padding_mask, **kwargs) + query = self.norms[1](query) + query = self.ffn(query) + query = self.norms[2](query) - inter_references_out = inter_references - if self.as_two_stage: - return inter_states, init_reference_out,\ - inter_references_out, enc_outputs_class,\ - enc_outputs_coord_unact - return inter_states, init_reference_out, \ - inter_references_out, None, None + return query @TRANSFORMER.register_module() From 3147437758cdbd6595884105dd35b72b6a1e3cdb Mon Sep 17 00:00:00 2001 From: QingyunLi <962537281@qq.com> Date: Thu, 8 Sep 2022 17:18:00 +0800 Subject: [PATCH 17/23] feat: add TransformerDetector modules as the base detector module of DETR-like detectors --- .../models/detectors/detection_transformer.py | 230 ++++++++++++++++++ 1 file changed, 230 insertions(+) create mode 100644 mmdet/models/detectors/detection_transformer.py diff --git a/mmdet/models/detectors/detection_transformer.py b/mmdet/models/detectors/detection_transformer.py new file mode 100644 index 00000000000..e114e37e3d8 --- /dev/null +++ b/mmdet/models/detectors/detection_transformer.py @@ -0,0 +1,230 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +import torch.nn.functional as F +from mmcv.cnn import Conv2d, xavier_init +from torch import nn + +from mmdet.core import bbox2result +from ..builder import DETECTORS, build_backbone, build_head, build_neck +from ..utils.positional_encoding import SinePositionalEncoding +from ..utils.transformer import DetrTransformerDecoder, DetrTransformerEncoder +from .base import BaseDetector + + +@DETECTORS.register_module() +class TransformerDetector(BaseDetector): + + def __init__( + self, + backbone, + encoder_cfg, + decoder_cfg, + bbox_head, + neck=None, + positional_encoding_cfg=dict(num_feats=128, normalize=True), + num_query=100, + train_cfg=None, + test_cfg=None, + # pretrained=None, + init_cfg=None): + super(TransformerDetector, self).__init__(init_cfg) + # if pretrained: # TODO: Should this be deleted? + # warnings.warn('DeprecationWarning: pretrained is deprecated, ' + # 'please use "init_cfg" instead') + # backbone.pretrained = pretrained + bbox_head.update(train_cfg=train_cfg) + bbox_head.update(test_cfg=test_cfg) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.encoder_cfg = encoder_cfg + self.decoder_cfg = decoder_cfg + self.positional_encoding_cfg = positional_encoding_cfg + self.num_query = num_query + + self.backbone = build_backbone(backbone) + if neck is not None: + self.neck = build_neck(neck) + self.bbox_head = build_head(bbox_head) + self._init_layers() + + def _init_layers(self): + self._init_transformer() + self._init_decoder_queries() + self._init_input_proj() + + def _init_transformer(self): + self.positional_encoding = SinePositionalEncoding( + **self.positional_encoding_cfg) + self.encoder = DetrTransformerEncoder(**self.encoder_cfg) + self.decoder = DetrTransformerDecoder(**self.decoder_cfg) + self.embed_dims = self.encoder.embed_dims # TODO + + num_feats = self.positional_encoding.num_feats + assert num_feats * 2 == self.embed_dims, \ + f'embed_dims should be exactly 2 times of num_feats. ' \ + f'Found {self.embed_dims} and {num_feats}.' + + def _init_input_proj(self): + in_channels = self.backbone.feat_dim # TODO + self.input_proj = Conv2d(in_channels, self.embed_dims, kernel_size=1) + + def _init_decoder_queries(self): + self.query_embedding = nn.Embedding(self.num_query, self.embed_dims) + + def init_weights(self): # TODO + super(TransformerDetector, self).init_weights() + self._init_transformer_weights() + self._is_init = True # TODO + + def _init_transformer_weights(self): # TODO + # follow the DetrTransformer to init parameters + for coder in [self.encoder, self.decoder]: + for m in coder.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + xavier_init(m, distribution='uniform') + + # def _load_from_state_dict # TODO ! + + def extract_feat(self, img): + x = self.backbone(img) + if self.with_neck: + x = self.neck(x) + return x + + def forward_train(self, + img, + img_metas, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None): + super(TransformerDetector, self).forward_train(img, img_metas) + x = self.extract_feat(img) + assert isinstance(x, tuple) and len(x) == 1 # TODO: delete this + x, mask, pos_embed = self.forward_pretransformer(x[0], img_metas) + outs_dec, _ = self.forward_transformer(x, mask, + self.query_embedding.weight, + pos_embed) + losses = self.bbox_head.forward_train(outs_dec, img_metas, gt_bboxes, + gt_labels, gt_bboxes_ignore) + return losses + + def simple_test(self, img, img_metas, rescale=False): + x = self.extract_feat(img) + assert isinstance(x, tuple) and len(x) == 1 # TODO: delete this + x, mask, pos_embed = self.forward_pretransformer(x[0], img_metas) + outs_dec, _ = self.forward_transformer(x, mask, + self.query_embedding.weight, + pos_embed) + results_list = self.bbox_head.simple_test( + outs_dec, img_metas, rescale=rescale) + bbox_results = [ + bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) + for det_bboxes, det_labels in results_list + ] + return bbox_results + + def aug_test(self, imgs, img_metas, rescale=False): + assert hasattr(self.bbox_head, 'aug_test'), \ + f'{self.bbox_head.__class__.__name__}' \ + ' does not support test-time augmentation' + + x = self.extract_feat(imgs) + assert isinstance(x, tuple) and len(x) == 1 # TODO: delete this + x, mask, pos_embed = self.forward_pretransformer(x[0], img_metas) + outs_dec, _ = self.forward_transformer(x, mask, + self.query_embedding.weight, + pos_embed) # TODO: may bugs + results_list = self.bbox_head.aug_test( + outs_dec, img_metas, rescale=rescale) + bbox_results = [ + bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) + for det_bboxes, det_labels in results_list + ] + return bbox_results + + def forward_pretransformer(self, x, img_metas): + # construct binary masks which used for the transformer. + # NOTE following the official DETR repo, non-zero values representing + # ignored positions, while zero values means valid positions. + batch_size = x.size(0) + input_img_h, input_img_w = img_metas[0]['batch_input_shape'] + masks = x.new_ones((batch_size, input_img_h, input_img_w)) + for img_id in range(batch_size): + img_h, img_w, _ = img_metas[img_id]['img_shape'] + masks[img_id, :img_h, :img_w] = 0 + + x = self.input_proj(x) + # interpolate masks to have the same spatial shape with x + masks = F.interpolate( + masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1) + # position encoding + pos_embed = self.positional_encoding(masks) # [bs, embed_dim, h, w] + return x, masks, pos_embed + + def forward_transformer(self, x, mask, query_embed, pos_embed): + bs, c, h, w = x.shape + # use `view` instead of `flatten` for dynamically exporting to ONNX + x = x.view(bs, c, -1).permute(2, 0, 1) # [bs, c, h, w] -> [h*w, bs, c] + pos_embed = pos_embed.view(bs, c, -1).permute(2, 0, 1) + query_embed = query_embed.unsqueeze(1).repeat( + 1, bs, 1) # [num_query, dim] -> [num_query, bs, dim] + mask = mask.view(bs, -1) # [bs, h, w] -> [bs, h*w] + memory = self.encoder( + query=x, query_pos=pos_embed, query_key_padding_mask=mask) + target = torch.zeros_like(query_embed) + # out_dec: [num_layers, num_query, bs, dim] + out_dec = self.decoder( + query=target, + key=memory, + value=memory, + key_pos=pos_embed, + query_pos=query_embed, + key_padding_mask=mask) + out_dec = out_dec.transpose(1, 2) + memory = memory.permute(1, 2, 0).reshape(bs, c, h, w) + return out_dec, memory + + # over-write `forward_dummy` because: + # the forward of bbox_head requires img_metas + def forward_dummy(self, img): + warnings.warn('Warning! MultiheadAttention in DETR does not ' + 'support flops computation! Do not use the ' + 'results in your papers!') + + batch_size, _, height, width = img.shape + dummy_img_metas = [ + dict( + batch_input_shape=(height, width), + img_shape=(height, width, 3)) for _ in range(batch_size) + ] + x = self.extract_feat(img) + outs = self.bbox_head(x, dummy_img_metas) + return outs + + # over-write `onnx_export` because: + # (1) the forward of bbox_head requires img_metas + # (2) the different behavior (e.g. construction of `masks`) between + # torch and ONNX model, during the forward of bbox_head + def onnx_export(self, img, img_metas): + """Test function for exporting to ONNX, without test time augmentation. + + Args: + img (torch.Tensor): input images. + img_metas (list[dict]): List of image information. + + Returns: + tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] + and class labels of shape [N, num_det]. + """ + x = self.extract_feat(img) + # forward of this head requires img_metas + outs = self.bbox_head.forward_onnx(x, img_metas) + # get shape as tensor + img_shape = torch._shape_as_tensor(img)[2:] + img_metas[0]['img_shape_for_onnx'] = img_shape + + det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas) + + return det_bboxes, det_labels From b02fb6a82b946df216e7c11e7561622b6d364f90 Mon Sep 17 00:00:00 2001 From: QingyunLi <962537281@qq.com> Date: Thu, 8 Sep 2022 17:20:53 +0800 Subject: [PATCH 18/23] refactor: refactor modules and configs of DETR --- configs/detr/detr_r50_8x2_150e_coco.py | 60 ++-- mmdet/models/dense_heads/detr_head.py | 366 +++---------------------- mmdet/models/detectors/detr.py | 66 +---- 3 files changed, 66 insertions(+), 426 deletions(-) diff --git a/configs/detr/detr_r50_8x2_150e_coco.py b/configs/detr/detr_r50_8x2_150e_coco.py index 892447dec15..4ca850523e0 100644 --- a/configs/detr/detr_r50_8x2_150e_coco.py +++ b/configs/detr/detr_r50_8x2_150e_coco.py @@ -3,6 +3,7 @@ ] model = dict( type='DETR', + num_query=100, backbone=dict( type='ResNet', depth=50, @@ -13,45 +14,34 @@ norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + encoder_cfg=dict( + num_layers=6, + layers_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.1), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + ffn_drop=0.1, + act_cfg=dict(type='ReLU', inplace=True)))), + decoder_cfg=dict( + num_layers=6, + layers_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.1), + cross_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.1), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + ffn_drop=0.1, + act_cfg=dict(type='ReLU', inplace=True))), + return_intermediate=True), + positional_encoding_cfg=dict(num_feats=128, normalize=True), bbox_head=dict( type='DETRHead', num_classes=80, + embed_dims=256, in_channels=2048, - transformer=dict( - type='Transformer', - encoder=dict( - type='DetrTransformerEncoder', - num_layers=6, - transformerlayers=dict( - type='BaseTransformerLayer', - attn_cfgs=[ - dict( - type='MultiheadAttention', - embed_dims=256, - num_heads=8, - dropout=0.1) - ], - feedforward_channels=2048, - ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'ffn', 'norm'))), - decoder=dict( - type='DetrTransformerDecoder', - return_intermediate=True, - num_layers=6, - transformerlayers=dict( - type='DetrTransformerDecoderLayer', - attn_cfgs=dict( - type='MultiheadAttention', - embed_dims=256, - num_heads=8, - dropout=0.1), - feedforward_channels=2048, - ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm')), - )), - positional_encoding=dict( - type='SinePositionalEncoding', num_feats=128, normalize=True), loss_cls=dict( type='CrossEntropyLoss', bg_cls_weight=0.1, diff --git a/mmdet/models/dense_heads/detr_head.py b/mmdet/models/dense_heads/detr_head.py index 6b7ee13cb68..0dc0932eb9f 100644 --- a/mmdet/models/dense_heads/detr_head.py +++ b/mmdet/models/dense_heads/detr_head.py @@ -2,64 +2,27 @@ import torch import torch.nn as nn import torch.nn.functional as F -from mmcv.cnn import Conv2d, Linear, build_activation_layer -from mmcv.cnn.bricks.transformer import FFN, build_positional_encoding +from mmcv.cnn import Linear +from mmcv.cnn.bricks.transformer import FFN from mmcv.runner import force_fp32 from mmdet.core import (bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh, build_assigner, build_sampler, multi_apply, reduce_mean) -from mmdet.models.utils import build_transformer from ..builder import HEADS, build_loss from .anchor_free_head import AnchorFreeHead @HEADS.register_module() class DETRHead(AnchorFreeHead): - """Implements the DETR transformer head. - - See `paper: End-to-End Object Detection with Transformers - `_ for details. - - Args: - num_classes (int): Number of categories excluding the background. - in_channels (int): Number of channels in the input feature map. - num_query (int): Number of query in Transformer. - num_reg_fcs (int, optional): Number of fully-connected layers used in - `FFN`, which is then used for the regression head. Default 2. - transformer (obj:`mmcv.ConfigDict`|dict): Config for transformer. - Default: None. - sync_cls_avg_factor (bool): Whether to sync the avg_factor of - all ranks. Default to False. - positional_encoding (obj:`mmcv.ConfigDict`|dict): - Config for position encoding. - loss_cls (obj:`mmcv.ConfigDict`|dict): Config of the - classification loss. Default `CrossEntropyLoss`. - loss_bbox (obj:`mmcv.ConfigDict`|dict): Config of the - regression loss. Default `L1Loss`. - loss_iou (obj:`mmcv.ConfigDict`|dict): Config of the - regression iou loss. Default `GIoULoss`. - tran_cfg (obj:`mmcv.ConfigDict`|dict): Training config of - transformer head. - test_cfg (obj:`mmcv.ConfigDict`|dict): Testing config of - transformer head. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ _version = 2 def __init__(self, num_classes, - in_channels, - num_query=100, + embed_dims, num_reg_fcs=2, - transformer=None, sync_cls_avg_factor=False, - positional_encoding=dict( - type='SinePositionalEncoding', - num_feats=128, - normalize=True), loss_cls=dict( type='CrossEntropyLoss', bg_cls_weight=0.1, @@ -104,16 +67,15 @@ def __init__(self, self.bg_cls_weight = bg_cls_weight if train_cfg: - assert 'assigner' in train_cfg, 'assigner should be provided '\ + assert 'assigner' in train_cfg, 'assigner should be provided ' \ 'when train_cfg is set.' assigner = train_cfg['assigner'] self.assigner = build_assigner(assigner) # DETR sampling=False, so use PseudoSampler sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) - self.num_query = num_query self.num_classes = num_classes - self.in_channels = in_channels + self.embed_dims = embed_dims self.num_reg_fcs = num_reg_fcs self.train_cfg = train_cfg self.test_cfg = test_cfg @@ -126,130 +88,29 @@ def __init__(self, self.cls_out_channels = num_classes else: self.cls_out_channels = num_classes + 1 - self.act_cfg = transformer.get('act_cfg', - dict(type='ReLU', inplace=True)) - self.activate = build_activation_layer(self.act_cfg) - self.positional_encoding = build_positional_encoding( - positional_encoding) - self.transformer = build_transformer(transformer) - self.embed_dims = self.transformer.embed_dims - assert 'num_feats' in positional_encoding - num_feats = positional_encoding['num_feats'] - assert num_feats * 2 == self.embed_dims, 'embed_dims should' \ - f' be exactly 2 times of num_feats. Found {self.embed_dims}' \ - f' and {num_feats}.' + self._init_layers() def _init_layers(self): """Initialize layers of the transformer head.""" - self.input_proj = Conv2d( - self.in_channels, self.embed_dims, kernel_size=1) + # cls branch self.fc_cls = Linear(self.embed_dims, self.cls_out_channels) + # reg branch + self.activate = nn.ReLU() self.reg_ffn = FFN( self.embed_dims, self.embed_dims, self.num_reg_fcs, - self.act_cfg, + dict(type='ReLU', inplace=True), dropout=0.0, add_residual=False) + # NOTE the activations of reg_branch is the same as those in + # transformer, but they are actually different in Conditional DETR + # and DAB DETR (prelu in transformer and relu in reg_branch) self.fc_reg = Linear(self.embed_dims, 4) - self.query_embedding = nn.Embedding(self.num_query, self.embed_dims) - - def init_weights(self): - """Initialize weights of the transformer head.""" - # The initialization for transformer is important - self.transformer.init_weights() - - def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs): - """load checkpoints.""" - # NOTE here use `AnchorFreeHead` instead of `TransformerHead`, - # since `AnchorFreeHead._load_from_state_dict` should not be - # called here. Invoking the default `Module._load_from_state_dict` - # is enough. - - # Names of some parameters in has been changed. - version = local_metadata.get('version', None) - if (version is None or version < 2) and self.__class__ is DETRHead: - convert_dict = { - '.self_attn.': '.attentions.0.', - '.ffn.': '.ffns.0.', - '.multihead_attn.': '.attentions.1.', - '.decoder.norm.': '.decoder.post_norm.' - } - state_dict_keys = list(state_dict.keys()) - for k in state_dict_keys: - for ori_key, convert_key in convert_dict.items(): - if ori_key in k: - convert_key = k.replace(ori_key, convert_key) - state_dict[convert_key] = state_dict[k] - del state_dict[k] - - super(AnchorFreeHead, - self)._load_from_state_dict(state_dict, prefix, local_metadata, - strict, missing_keys, - unexpected_keys, error_msgs) - - def forward(self, feats, img_metas): - """Forward function. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - img_metas (list[dict]): List of image information. - - Returns: - tuple[list[Tensor], list[Tensor]]: Outputs for all scale levels. - - - all_cls_scores_list (list[Tensor]): Classification scores \ - for each scale level. Each is a 4D-tensor with shape \ - [nb_dec, bs, num_query, cls_out_channels]. Note \ - `cls_out_channels` should includes background. - - all_bbox_preds_list (list[Tensor]): Sigmoid regression \ - outputs for each scale level. Each is a 4D-tensor with \ - normalized coordinate format (cx, cy, w, h) and shape \ - [nb_dec, bs, num_query, 4]. - """ - num_levels = len(feats) - img_metas_list = [img_metas for _ in range(num_levels)] - return multi_apply(self.forward_single, feats, img_metas_list) - def forward_single(self, x, img_metas): - """"Forward function for a single feature level. - - Args: - x (Tensor): Input feature from backbone's single stage, shape - [bs, c, h, w]. - img_metas (list[dict]): List of image information. - - Returns: - all_cls_scores (Tensor): Outputs from the classification head, - shape [nb_dec, bs, num_query, cls_out_channels]. Note - cls_out_channels should includes background. - all_bbox_preds (Tensor): Sigmoid outputs from the regression - head with normalized coordinate format (cx, cy, w, h). - Shape [nb_dec, bs, num_query, 4]. - """ - # construct binary masks which used for the transformer. - # NOTE following the official DETR repo, non-zero values representing - # ignored positions, while zero values means valid positions. - batch_size = x.size(0) - input_img_h, input_img_w = img_metas[0]['batch_input_shape'] - masks = x.new_ones((batch_size, input_img_h, input_img_w)) - for img_id in range(batch_size): - img_h, img_w, _ = img_metas[img_id]['img_shape'] - masks[img_id, :img_h, :img_w] = 0 - - x = self.input_proj(x) - # interpolate masks to have the same spatial shape with x - masks = F.interpolate( - masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1) - # position encoding - pos_embed = self.positional_encoding(masks) # [bs, embed_dim, h, w] + def forward(self, outs_dec): # outs_dec: [nb_dec, bs, num_query, embed_dim] - outs_dec, _ = self.transformer(x, masks, self.query_embedding.weight, - pos_embed) - all_cls_scores = self.fc_cls(outs_dec) all_bbox_preds = self.fc_reg(self.activate( self.reg_ffn(outs_dec))).sigmoid() @@ -257,8 +118,8 @@ def forward_single(self, x, img_metas): @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) def loss(self, - all_cls_scores_list, - all_bbox_preds_list, + all_cls_scores, + all_bbox_preds, gt_bboxes_list, gt_labels_list, img_metas, @@ -269,11 +130,11 @@ def loss(self, losses by default. Args: - all_cls_scores_list (list[Tensor]): Classification outputs - for each feature level. Each is a 4D-tensor with shape + all_cls_scores (Tensor): Classification output + for each feature level, which is a 4D-tensor with shape [nb_dec, bs, num_query, cls_out_channels]. - all_bbox_preds_list (list[Tensor]): Sigmoid regression - outputs for each feature level. Each is a 4D-tensor with + all_bbox_preds (Tensor): Sigmoid regression + outputs for each feature level, which is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and shape [nb_dec, bs, num_query, 4]. gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image @@ -287,9 +148,6 @@ def loss(self, Returns: dict[str, Tensor]: A dictionary of loss components. """ - # NOTE defaultly only the outputs from the last feature scale is used. - all_cls_scores = all_cls_scores_list[-1] - all_bbox_preds = all_bbox_preds_list[-1] assert gt_bboxes_ignore is None, \ 'Only supports for gt_bboxes_ignore setting to None.' @@ -562,7 +420,7 @@ def forward_train(self, dict[str, Tensor]: A dictionary of loss components. """ assert proposal_cfg is None, '"proposal_cfg" must be None' - outs = self(x, img_metas) + outs = self(x) if gt_labels is None: loss_inputs = outs + (gt_bboxes, img_metas) else: @@ -572,18 +430,18 @@ def forward_train(self, @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) def get_bboxes(self, - all_cls_scores_list, - all_bbox_preds_list, + all_cls_scores, + all_bbox_preds, img_metas, rescale=False): """Transform network outputs for a batch into bbox predictions. Args: - all_cls_scores_list (list[Tensor]): Classification outputs - for each feature level. Each is a 4D-tensor with shape + all_cls_scores (Tensor): Classification output + for each feature level, which is a 4D-tensor with shape [nb_dec, bs, num_query, cls_out_channels]. - all_bbox_preds_list (list[Tensor]): Sigmoid regression - outputs for each feature level. Each is a 4D-tensor with + all_bbox_preds (Tensor): Sigmoid regression + output for each feature level, which is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and shape [nb_dec, bs, num_query, 4]. img_metas (list[dict]): Meta information of each image. @@ -600,8 +458,8 @@ def get_bboxes(self, """ # NOTE defaultly only using outputs from the last feature level, # and only the outputs from the last decoder layer is used. - cls_scores = all_cls_scores_list[-1][-1] - bbox_preds = all_bbox_preds_list[-1][-1] + cls_scores = all_cls_scores[-1] + bbox_preds = all_bbox_preds[-1] result_list = [] for img_id in range(len(img_metas)): @@ -647,8 +505,8 @@ def _get_bboxes_single(self, - det_labels: Predicted labels of the corresponding box with \ shape [num_query]. """ - assert len(cls_score) == len(bbox_pred) - max_per_img = self.test_cfg.get('max_per_img', self.num_query) + assert len(cls_score) == len(bbox_pred) # num_query + max_per_img = self.test_cfg.get('max_per_img', len(cls_score)) # exclude background if self.loss_cls.use_sigmoid: cls_score = cls_score.sigmoid() @@ -673,163 +531,13 @@ def _get_bboxes_single(self, return det_bboxes, det_labels - def simple_test_bboxes(self, feats, img_metas, rescale=False): - """Test det bboxes without test-time augmentation. - - Args: - feats (tuple[torch.Tensor]): Multi-level features from the - upstream network, each is a 4D-tensor. - img_metas (list[dict]): List of image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is ``bboxes`` with shape (n, 5), - where 5 represent (tl_x, tl_y, br_x, br_y, score). - The shape of the second tensor in the tuple is ``labels`` - with shape (n,) - """ + def simple_test_bboxes(self, outs_dec, img_metas, rescale=False): # forward of this head requires img_metas - outs = self.forward(feats, img_metas) + outs = self.forward(outs_dec) results_list = self.get_bboxes(*outs, img_metas, rescale=rescale) return results_list - def forward_onnx(self, feats, img_metas): - """Forward function for exporting to ONNX. - - Over-write `forward` because: `masks` is directly created with - zero (valid position tag) and has the same spatial size as `x`. - Thus the construction of `masks` is different from that in `forward`. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - img_metas (list[dict]): List of image information. - - Returns: - tuple[list[Tensor], list[Tensor]]: Outputs for all scale levels. - - - all_cls_scores_list (list[Tensor]): Classification scores \ - for each scale level. Each is a 4D-tensor with shape \ - [nb_dec, bs, num_query, cls_out_channels]. Note \ - `cls_out_channels` should includes background. - - all_bbox_preds_list (list[Tensor]): Sigmoid regression \ - outputs for each scale level. Each is a 4D-tensor with \ - normalized coordinate format (cx, cy, w, h) and shape \ - [nb_dec, bs, num_query, 4]. - """ - num_levels = len(feats) - img_metas_list = [img_metas for _ in range(num_levels)] - return multi_apply(self.forward_single_onnx, feats, img_metas_list) - - def forward_single_onnx(self, x, img_metas): - """"Forward function for a single feature level with ONNX exportation. - - Args: - x (Tensor): Input feature from backbone's single stage, shape - [bs, c, h, w]. - img_metas (list[dict]): List of image information. - - Returns: - all_cls_scores (Tensor): Outputs from the classification head, - shape [nb_dec, bs, num_query, cls_out_channels]. Note - cls_out_channels should includes background. - all_bbox_preds (Tensor): Sigmoid outputs from the regression - head with normalized coordinate format (cx, cy, w, h). - Shape [nb_dec, bs, num_query, 4]. - """ - # Note `img_shape` is not dynamically traceable to ONNX, - # since the related augmentation was done with numpy under - # CPU. Thus `masks` is directly created with zeros (valid tag) - # and the same spatial shape as `x`. - # The difference between torch and exported ONNX model may be - # ignored, since the same performance is achieved (e.g. - # 40.1 vs 40.1 for DETR) - batch_size = x.size(0) - h, w = x.size()[-2:] - masks = x.new_zeros((batch_size, h, w)) # [B,h,w] - - x = self.input_proj(x) - # interpolate masks to have the same spatial shape with x - masks = F.interpolate( - masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1) - pos_embed = self.positional_encoding(masks) - outs_dec, _ = self.transformer(x, masks, self.query_embedding.weight, - pos_embed) - - all_cls_scores = self.fc_cls(outs_dec) - all_bbox_preds = self.fc_reg(self.activate( - self.reg_ffn(outs_dec))).sigmoid() - return all_cls_scores, all_bbox_preds - - def onnx_export(self, all_cls_scores_list, all_bbox_preds_list, img_metas): - """Transform network outputs into bbox predictions, with ONNX - exportation. - - Args: - all_cls_scores_list (list[Tensor]): Classification outputs - for each feature level. Each is a 4D-tensor with shape - [nb_dec, bs, num_query, cls_out_channels]. - all_bbox_preds_list (list[Tensor]): Sigmoid regression - outputs for each feature level. Each is a 4D-tensor with - normalized coordinate format (cx, cy, w, h) and shape - [nb_dec, bs, num_query, 4]. - img_metas (list[dict]): Meta information of each image. - - Returns: - tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] - and class labels of shape [N, num_det]. - """ - assert len(img_metas) == 1, \ - 'Only support one input image while in exporting to ONNX' - - cls_scores = all_cls_scores_list[-1][-1] - bbox_preds = all_bbox_preds_list[-1][-1] - - # Note `img_shape` is not dynamically traceable to ONNX, - # here `img_shape_for_onnx` (padded shape of image tensor) - # is used. - img_shape = img_metas[0]['img_shape_for_onnx'] - max_per_img = self.test_cfg.get('max_per_img', self.num_query) - batch_size = cls_scores.size(0) - # `batch_index_offset` is used for the gather of concatenated tensor - batch_index_offset = torch.arange(batch_size).to( - cls_scores.device) * max_per_img - batch_index_offset = batch_index_offset.unsqueeze(1).expand( - batch_size, max_per_img) - - # supports dynamical batch inference - if self.loss_cls.use_sigmoid: - cls_scores = cls_scores.sigmoid() - scores, indexes = cls_scores.view(batch_size, -1).topk( - max_per_img, dim=1) - det_labels = indexes % self.num_classes - bbox_index = indexes // self.num_classes - bbox_index = (bbox_index + batch_index_offset).view(-1) - bbox_preds = bbox_preds.view(-1, 4)[bbox_index] - bbox_preds = bbox_preds.view(batch_size, -1, 4) - else: - scores, det_labels = F.softmax( - cls_scores, dim=-1)[..., :-1].max(-1) - scores, bbox_index = scores.topk(max_per_img, dim=1) - bbox_index = (bbox_index + batch_index_offset).view(-1) - bbox_preds = bbox_preds.view(-1, 4)[bbox_index] - det_labels = det_labels.view(-1)[bbox_index] - bbox_preds = bbox_preds.view(batch_size, -1, 4) - det_labels = det_labels.view(batch_size, -1) - - det_bboxes = bbox_cxcywh_to_xyxy(bbox_preds) - # use `img_shape_tensor` for dynamically exporting to ONNX - img_shape_tensor = img_shape.flip(0).repeat(2) # [w,h,w,h] - img_shape_tensor = img_shape_tensor.unsqueeze(0).unsqueeze(0).expand( - batch_size, det_bboxes.size(1), 4) - det_bboxes = det_bboxes * img_shape_tensor - # dynamically clip bboxes - x1, y1, x2, y2 = det_bboxes.split((1, 1, 1, 1), dim=-1) - from mmdet.core.export import dynamic_clip_for_onnx - x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, img_shape) - det_bboxes = torch.cat([x1, y1, x2, y2], dim=-1) - det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(-1)), -1) - - return det_bboxes, det_labels + # def forward_onnx(self, feats, img_metas): # TODO + # def forward_single_onnx(self, x, img_metas): # TODO + # def onnx_export( + # self, all_cls_scores_list, all_bbox_preds_list, img_metas): # TODO diff --git a/mmdet/models/detectors/detr.py b/mmdet/models/detectors/detr.py index 06d76913be6..3f483e96df0 100644 --- a/mmdet/models/detectors/detr.py +++ b/mmdet/models/detectors/detr.py @@ -1,70 +1,12 @@ # Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch - from ..builder import DETECTORS -from .single_stage import SingleStageDetector +from .detection_transformer import TransformerDetector @DETECTORS.register_module() -class DETR(SingleStageDetector): +class DETR(TransformerDetector): r"""Implementation of `DETR: End-to-End Object Detection with Transformers `_""" - def __init__(self, - backbone, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(DETR, self).__init__(backbone, None, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) - - # over-write `forward_dummy` because: - # the forward of bbox_head requires img_metas - def forward_dummy(self, img): - """Used for computing network flops. - - See `mmdetection/tools/analysis_tools/get_flops.py` - """ - warnings.warn('Warning! MultiheadAttention in DETR does not ' - 'support flops computation! Do not use the ' - 'results in your papers!') - - batch_size, _, height, width = img.shape - dummy_img_metas = [ - dict( - batch_input_shape=(height, width), - img_shape=(height, width, 3)) for _ in range(batch_size) - ] - x = self.extract_feat(img) - outs = self.bbox_head(x, dummy_img_metas) - return outs - - # over-write `onnx_export` because: - # (1) the forward of bbox_head requires img_metas - # (2) the different behavior (e.g. construction of `masks`) between - # torch and ONNX model, during the forward of bbox_head - def onnx_export(self, img, img_metas): - """Test function for exporting to ONNX, without test time augmentation. - - Args: - img (torch.Tensor): input images. - img_metas (list[dict]): List of image information. - - Returns: - tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] - and class labels of shape [N, num_det]. - """ - x = self.extract_feat(img) - # forward of this head requires img_metas - outs = self.bbox_head.forward_onnx(x, img_metas) - # get shape as tensor - img_shape = torch._shape_as_tensor(img)[2:] - img_metas[0]['img_shape_for_onnx'] = img_shape - - det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas) - - return det_bboxes, det_labels + def __init__(self, *args, **kwargs): + super(DETR, self).__init__(*args, **kwargs) From 4bb2f9fe1197255a4d686ec1d10f724f669ca687 Mon Sep 17 00:00:00 2001 From: QingyunLi <962537281@qq.com> Date: Thu, 8 Sep 2022 17:22:32 +0800 Subject: [PATCH 19/23] fix: modified the breaking import of MSDeformAttn --- mmdet/models/plugins/msdeformattn_pixel_decoder.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mmdet/models/plugins/msdeformattn_pixel_decoder.py b/mmdet/models/plugins/msdeformattn_pixel_decoder.py index d553582baef..7060267c89e 100644 --- a/mmdet/models/plugins/msdeformattn_pixel_decoder.py +++ b/mmdet/models/plugins/msdeformattn_pixel_decoder.py @@ -4,12 +4,12 @@ import torch.nn.functional as F from mmcv.cnn import (PLUGIN_LAYERS, Conv2d, ConvModule, caffe2_xavier_init, normal_init, xavier_init) -from mmcv.cnn.bricks.transformer import (build_positional_encoding, +from mmcv.cnn.bricks.transformer import (MultiScaleDeformableAttention, + build_positional_encoding, build_transformer_layer_sequence) from mmcv.runner import BaseModule, ModuleList from mmdet.core.anchor import MlvlPointGenerator -from mmdet.models.utils.transformer import MultiScaleDeformableAttention @PLUGIN_LAYERS.register_module() From c892bac44bc06dfd97ee28943b56a361c8513f59 Mon Sep 17 00:00:00 2001 From: QingyunLi <962537281@qq.com> Date: Thu, 8 Sep 2022 17:23:45 +0800 Subject: [PATCH 20/23] fix: modified transformer.py --- mmdet/models/utils/__init__.py | 9 ++---- mmdet/models/utils/transformer.py | 47 ++++--------------------------- 2 files changed, 8 insertions(+), 48 deletions(-) diff --git a/mmdet/models/utils/__init__.py b/mmdet/models/utils/__init__.py index e74ba89e8c2..90a3262e2fb 100644 --- a/mmdet/models/utils/__init__.py +++ b/mmdet/models/utils/__init__.py @@ -16,14 +16,11 @@ SinePositionalEncoding) from .res_layer import ResLayer, SimplifiedBasicBlock from .se_layer import DyReLU, SELayer -from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer, - DynamicConv, PatchEmbed, Transformer, nchw_to_nlc, - nlc_to_nchw) +from .transformer import DynamicConv, PatchEmbed, nchw_to_nlc, nlc_to_nchw __all__ = [ - 'ResLayer', 'gaussian_radius', 'gen_gaussian_target', - 'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer', - 'build_transformer', 'build_linear_layer', 'SinePositionalEncoding', + 'ResLayer', 'gaussian_radius', 'gen_gaussian_target', 'build_transformer', + 'build_linear_layer', 'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual', 'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer', diff --git a/mmdet/models/utils/transformer.py b/mmdet/models/utils/transformer.py index 41579f26cff..42f66ac5f6b 100644 --- a/mmdet/models/utils/transformer.py +++ b/mmdet/models/utils/transformer.py @@ -418,27 +418,9 @@ def _init_layers(self): self.layers.append( DetrTransformerEncoderLayer(**self.layers_cfg[i])) - def forward(self, - query, - key, - value, - query_pos=None, - key_pos=None, - attn_masks=None, - query_key_padding_mask=None, - key_padding_mask=None, - **kwargs): + def forward(self, query, *args, **kwargs): for layer in self.layers: - query = layer( - query, - key, - value, - query_pos=query_pos, - key_pos=key_pos, - attn_masks=attn_masks, - query_key_padding_mask=query_key_padding_mask, - key_padding_mask=key_padding_mask, - **kwargs) + query = layer(query, *args, **kwargs) if self.post_norm is not None: query = self.post_norm(query) return query @@ -473,28 +455,10 @@ def _init_layers(self): self.layers.append( DetrTransformerDecoderLayer(**self.layers_cfg[i])) - def forward(self, - query, - key, - value, - query_pos=None, - key_pos=None, - attn_masks=None, - query_key_padding_mask=None, - key_padding_mask=None, - **kwargs): + def forward(self, query, *args, **kwargs): intermediate = [] for layer in self.layers: - query = layer( - query, - key, - value, - query_pos=query_pos, - key_pos=key_pos, - attn_masks=attn_masks, - query_key_padding_mask=query_key_padding_mask, - key_padding_mask=key_padding_mask, - **kwargs) + query = layer(query, *args, **kwargs) if self.return_intermediate: intermediate.append(query) @@ -519,8 +483,7 @@ def __init__(self, feedforward_channels=1024, num_fcs=2, ffn_drop=0., - act_cfg=dict(type='ReLU', inplace=True), - ), + act_cfg=dict(type='ReLU', inplace=True)), norm_cfg=dict(type='LN'), init_cfg=None, batch_first=False): From 773fa93b2e3f0ea3b859a835882a833e652db8e4 Mon Sep 17 00:00:00 2001 From: LYM Date: Thu, 8 Sep 2022 20:21:12 +0800 Subject: [PATCH 21/23] encoder post_norm --- mmdet/models/utils/transformer.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/mmdet/models/utils/transformer.py b/mmdet/models/utils/transformer.py index 42f66ac5f6b..466e9e7e443 100644 --- a/mmdet/models/utils/transformer.py +++ b/mmdet/models/utils/transformer.py @@ -395,7 +395,7 @@ class DetrTransformerEncoder(BaseModule): def __init__(self, layers_cfg=None, num_layers=None, - post_norm_cfg=dict(type='LN'), + post_norm_cfg=None, init_cfg=None): super().__init__(init_cfg) @@ -409,8 +409,6 @@ def __init__(self, self.num_layers = num_layers self._init_layers() self.embed_dims = self.layers[0].embed_dims # TODO - self.post_norm = build_norm_layer(self.post_norm_cfg, - self.embed_dims)[1] def _init_layers(self): self.layers = ModuleList() @@ -421,8 +419,6 @@ def _init_layers(self): def forward(self, query, *args, **kwargs): for layer in self.layers: query = layer(query, *args, **kwargs) - if self.post_norm is not None: - query = self.post_norm(query) return query From a2e625a6c64c0d6fa1ed360276b7f9d021b8bb20 Mon Sep 17 00:00:00 2001 From: LYM Date: Thu, 8 Sep 2022 22:46:27 +0800 Subject: [PATCH 22/23] encoder post_norm --- mmdet/models/utils/transformer.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/mmdet/models/utils/transformer.py b/mmdet/models/utils/transformer.py index 466e9e7e443..eb4a3c96597 100644 --- a/mmdet/models/utils/transformer.py +++ b/mmdet/models/utils/transformer.py @@ -395,7 +395,6 @@ class DetrTransformerEncoder(BaseModule): def __init__(self, layers_cfg=None, num_layers=None, - post_norm_cfg=None, init_cfg=None): super().__init__(init_cfg) @@ -405,7 +404,6 @@ def __init__(self, assert isinstance(layers_cfg, list) and \ len(layers_cfg) == num_layers # TODO self.layers_cfg = layers_cfg # TODO - self.post_norm_cfg = post_norm_cfg self.num_layers = num_layers self._init_layers() self.embed_dims = self.layers[0].embed_dims # TODO From 80fe9dca4741384b01793a7182eacb48eb66b093 Mon Sep 17 00:00:00 2001 From: LYM Date: Fri, 9 Sep 2022 11:16:52 +0800 Subject: [PATCH 23/23] encoder post_norm --- mmdet/models/utils/transformer.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mmdet/models/utils/transformer.py b/mmdet/models/utils/transformer.py index eb4a3c96597..f3afff94d25 100644 --- a/mmdet/models/utils/transformer.py +++ b/mmdet/models/utils/transformer.py @@ -454,8 +454,7 @@ def forward(self, query, *args, **kwargs): for layer in self.layers: query = layer(query, *args, **kwargs) if self.return_intermediate: - intermediate.append(query) - + intermediate.append(self.post_norm(query)) if self.post_norm is not None: query = self.post_norm(query) if self.return_intermediate: