Skip to content

Commit

Permalink
Merge c62e537 into 0f9dfa9
Browse files Browse the repository at this point in the history
  • Loading branch information
Xiangxu-0103 authored Jul 18, 2023
2 parents 0f9dfa9 + c62e537 commit 927e772
Show file tree
Hide file tree
Showing 6 changed files with 42 additions and 30 deletions.
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# Copyright (c) OpenMMLab. All rights reserved.
if '_base_':
from mmengine.config import read_base

with read_base():
from .._base_.datasets.nus_3d import *
from .._base_.models.centerpoint_pillar02_second_secfpn_nus import *
from .._base_.schedules.cyclic_20e import *
Expand Down Expand Up @@ -29,9 +31,9 @@
'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier',
'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
data_prefix.merge(
data_prefix.update(
dict(pts='samples/LIDAR_TOP', img='', sweeps='sweeps/LIDAR_TOP'))
model.merge(
model.update(
dict(
data_preprocessor=dict(
voxel_layer=dict(point_cloud_range=point_cloud_range)),
Expand Down Expand Up @@ -167,13 +169,13 @@
# and box_type_3d='Depth' in sunrgbd and scannet dataset.
box_type_3d='LiDAR',
backend_args=backend_args))))
test_dataloader.merge(
test_dataloader.update(
dict(
dataset=dict(
pipeline=test_pipeline, metainfo=dict(classes=class_names))))
val_dataloader.merge(
val_dataloader.update(
dict(
dataset=dict(
pipeline=test_pipeline, metainfo=dict(classes=class_names))))

train_cfg.merge(dict(val_interval=20))
train_cfg.update(dict(val_interval=20))
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# Copyright (c) OpenMMLab. All rights reserved.
if '_base_':
from mmengine import read_base

with read_base():
from .._base_.datasets.nus_3d import *
from .._base_.models.centerpoint_voxel01_second_secfpn_nus import *
from .._base_.schedules.cyclic_20e import *
Expand Down Expand Up @@ -29,9 +31,9 @@
'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier',
'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
data_prefix.merge(
data_prefix.update(
dict(pts='samples/LIDAR_TOP', img='', sweeps='sweeps/LIDAR_TOP'))
model.merge(
model.update(
dict(
data_preprocessor=dict(
voxel_layer=dict(point_cloud_range=point_cloud_range)),
Expand Down Expand Up @@ -167,13 +169,13 @@
# and box_type_3d='Depth' in sunrgbd and scannet dataset.
box_type_3d='LiDAR',
backend_args=backend_args))))
test_dataloader.merge(
test_dataloader.update(
dict(
dataset=dict(
pipeline=test_pipeline, metainfo=dict(classes=class_names))))
val_dataloader.merge(
val_dataloader.update(
dict(
dataset=dict(
pipeline=test_pipeline, metainfo=dict(classes=class_names))))

train_cfg.merge(dict(val_interval=20))
train_cfg.update(dict(val_interval=20))
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# Copyright (c) OpenMMLab. All rights reserved.
if '_base_':
from mmengine import read_base

with read_base():
from .._base_.datasets.semantickitti import *
from .._base_.models.minkunet import *
from .._base_.schedules.schedule_3x import *
Expand All @@ -15,7 +17,7 @@
from mmdet3d.datasets.transforms.transforms_3d import (GlobalRotScaleTrans,
LaserMix, PolarMix)

model.merge(
model.update(
dict(
data_preprocessor=dict(max_voxels=None),
backbone=dict(encoder_blocks=[2, 3, 4, 6])))
Expand Down Expand Up @@ -92,6 +94,6 @@
dict(type=Pack3DDetInputs, keys=['points', 'pts_semantic_mask'])
]

train_dataloader.merge(dict(dataset=dict(pipeline=train_pipeline)))
train_dataloader.update(dict(dataset=dict(pipeline=train_pipeline)))

default_hooks.merge(dict(checkpoint=dict(type=CheckpointHook, interval=1)))
default_hooks.update(dict(checkpoint=dict(type=CheckpointHook, interval=1)))
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# Copyright (c) OpenMMLab. All rights reserved.
if '_base_':
from mmengine import read_base

with read_base():
from .._base_.schedules.cosine import *
from .._base_.default_runtime import *

Expand Down Expand Up @@ -293,7 +295,7 @@
box_type_3d='LiDAR',
backend_args=backend_args))

optim_wrapper.merge(
optim_wrapper.update(
dict(
optimizer=dict(weight_decay=0.01),
clip_grad=dict(max_norm=35, norm_type=2),
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# Copyright (c) OpenMMLab. All rights reserved.
if '_base_':
from mmengine import read_base

with read_base():
from .._base_.datasets.kitti_mono3d import *
from .._base_.models.pgd import *
from .._base_.schedules.mmdet_schedule_1x import *
Expand All @@ -19,7 +21,7 @@
from mmdet3d.models.task_modules.coders.pgd_bbox_coder import PGDBBoxCoder

# model settings
model.merge(
model.update(
dict(
data_preprocessor=dict(
type=Det3DDataPreprocessor,
Expand Down Expand Up @@ -121,13 +123,13 @@
dict(type=Pack3DDetInputs, keys=['img'])
]

train_dataloader.merge(
train_dataloader.update(
dict(batch_size=3, num_workers=3, dataset=dict(pipeline=train_pipeline)))
test_dataloader.merge(dict(dataset=dict(pipeline=test_pipeline)))
val_dataloader.merge(dict(dataset=dict(pipeline=test_pipeline)))
test_dataloader.update(dict(dataset=dict(pipeline=test_pipeline)))
val_dataloader.update(dict(dataset=dict(pipeline=test_pipeline)))

# optimizer
optim_wrapper.merge(
optim_wrapper.update(
dict(
optimizer=dict(lr=0.001),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.),
Expand All @@ -146,5 +148,5 @@
gamma=0.1)
]

train_cfg.merge(dict(max_epochs=48, val_interval=2))
auto_scale_lr.merge(dict(base_batch_size=12))
train_cfg.update(dict(max_epochs=48, val_interval=2))
auto_scale_lr.update(dict(base_batch_size=12))
10 changes: 6 additions & 4 deletions mmdet3d/configs/votenet/votenet_8xb8_scannet_3d.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# Copyright (c) OpenMMLab. All rights reserved.
if '_base_':
from mmengine import read_base

with read_base():
from .._base_.datasets.scannet_3d import *
from .._base_.models.votenet import *
from .._base_.schedules.schedule_3x import *
Expand All @@ -11,7 +13,7 @@
PartialBinBasedBBoxCoder

# model settings
model.merge(
model.update(
dict(
bbox_head=dict(
num_classes=18,
Expand Down Expand Up @@ -39,9 +41,9 @@
[1.1511526, 1.0546296, 0.49706793],
[0.47535285, 0.49249494, 0.5802117]]))))

default_hooks.merge(dict(logger=dict(type=LoggerHook, interval=30)))
default_hooks.update(dict(logger=dict(type=LoggerHook, interval=30)))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (8 samples per GPU).
auto_scale_lr.merge(dict(enable=False, base_batch_size=64))
auto_scale_lr.update(dict(enable=False, base_batch_size=64))

0 comments on commit 927e772

Please sign in to comment.