Skip to content

Commit

Permalink
Regression benchmark (#219)
Browse files Browse the repository at this point in the history
* fix a bug for running vid & sot

* change eval_hook in sot config

* use evalhook of mmcv

* fix a bug where detector is not initilized when training
  • Loading branch information
GT9505 authored Jul 26, 2021
1 parent 2295bbb commit b28898b
Show file tree
Hide file tree
Showing 4 changed files with 50 additions and 18 deletions.
7 changes: 6 additions & 1 deletion configs/sot/siamese_rpn/siamese_rpn_r50_1x_lasot.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,12 @@
])
# checkpoint saving
checkpoint_config = dict(interval=1)
evaluation = dict(metric=['track'], interval=20)
evaluation = dict(
metric=['track'],
interval=1,
start=10,
rule='greater',
save_best='success')
# yapf:disable
log_config = dict(
interval=50,
Expand Down
4 changes: 1 addition & 3 deletions mmtrack/apis/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from mmcv.runner import (HOOKS, DistSamplerSeedHook, EpochBasedRunner,
Fp16OptimizerHook, OptimizerHook, build_optimizer)
from mmcv.utils import build_from_cfg
from mmdet.datasets import build_dataset

from mmtrack.core import DistEvalHook, EvalHook
from mmtrack.datasets import build_dataloader
Expand Down Expand Up @@ -31,9 +32,6 @@ def train_model(model,
"""
logger = get_root_logger(cfg.log_level)

if cfg.get('USE_MMDET', False) or cfg.get('USE_MMCLS', False):
from mmdet.datasets import build_dataset

# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
if 'imgs_per_gpu' in cfg.data:
Expand Down
55 changes: 41 additions & 14 deletions mmtrack/core/evaluation/eval_hooks.py
Original file line number Diff line number Diff line change
@@ -1,45 +1,72 @@
import os.path as osp

from mmdet.core import DistEvalHook as _DistEvalHook
from mmdet.core import EvalHook as _EvalHook
import torch.distributed as dist
from mmcv.runner import DistEvalHook as BaseDistEvalHook
from mmcv.runner import EvalHook as BaseEvalHook
from torch.nn.modules.batchnorm import _BatchNorm


class EvalHook(_EvalHook):
"""Please refer to `mmdet.core.evaluation.eval_hooks.py:EvalHook` for
detailed docstring."""
class EvalHook(BaseEvalHook):
"""Please refer to `mmcv.runner.hooks.evaluation.py:EvalHook` for detailed
docstring."""

def after_train_epoch(self, runner):
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
if not self._should_evaluate(runner):
return

if hasattr(self.dataloader.dataset,
'load_as_video') and self.dataloader.dataset.load_as_video:
from mmtrack.apis import single_gpu_test
else:
from mmdet.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False)
self.evaluate(runner, results)
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)


class DistEvalHook(_DistEvalHook):
"""Please refer to `mmdet.core.evaluation.eval_hooks.py:DistEvalHook` for
class DistEvalHook(BaseDistEvalHook):
"""Please refer to `mmcv.runner.hooks.evaluation.py:DistEvalHook` for
detailed docstring."""

def after_train_epoch(self, runner):
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
# Synchronization of BatchNorm's buffer (running_mean
# and running_var) is not supported in the DDP of pytorch,
# which may cause the inconsistent performance of models in
# different ranks, so we broadcast BatchNorm's buffers
# of rank 0 to other ranks to avoid this.
if self.broadcast_bn_buffer:
model = runner.model
for name, module in model.named_modules():
if isinstance(module,
_BatchNorm) and module.track_running_stats:
dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)

if not self._should_evaluate(runner):
return

tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, '.eval_hook')

if hasattr(self.dataloader.dataset,
'load_as_video') and self.dataloader.dataset.load_as_video:
from mmtrack.apis import multi_gpu_test
else:
from mmdet.apis import multi_gpu_test
tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, '.eval_hook')
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=tmpdir,
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
self.evaluate(runner, results)
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)

if self.save_best:
self._save_ckpt(runner, key_score)
2 changes: 2 additions & 0 deletions tools/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,8 @@ def main():
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
else:
model = build_model(cfg.model)
if 'detector' in cfg.model:
model.detector.init_weights()

datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
Expand Down

0 comments on commit b28898b

Please sign in to comment.