Skip to content

Commit

Permalink
Merge 472a4e6 into cda20ca
Browse files Browse the repository at this point in the history
  • Loading branch information
610265158 authored Jun 2, 2023
2 parents cda20ca + 472a4e6 commit 210f6d0
Show file tree
Hide file tree
Showing 7 changed files with 1,004 additions and 0 deletions.
83 changes: 83 additions & 0 deletions projects/skps/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
# Simple Keypoints

## Description

Author: @2120140200@mail.nankai.edu.cn

It is a simple keypoints detector model. The model predict a score heatmap and an encoded location map.
The result in wflw achieves 3.94 NME.

## Usage

### Prerequisites

- Python 3.7
- PyTorch 1.6 or higher
- [MIM](https://github.com/open-mmlab/mim) v0.33 or higher
- [MMPose](https://github.com/open-mmlab/mmpose) v1.0.0rc0 or higher

All the commands below rely on the correct configuration of `PYTHONPATH`, which should point to the project's directory so that Python can locate the module files. In `example_project/` root directory, run the following line to add the current directory to `PYTHONPATH`:

```shell
export PYTHONPATH=`pwd`:$PYTHONPATH
```

### Data Preparation

Prepare the COCO dataset according to the [instruction](https://mmpose.readthedocs.io/en/dev-1.x/dataset_zoo/2d_body_keypoint.html#coco).

### Training commands

**To train with single GPU:**

```shell
mim train mmpose configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py
```

**To train with multiple GPUs:**

```shell
mim train mmpose configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py --launcher pytorch --gpus 8
```

**To train with multiple GPUs by slurm:**

```shell
mim train mmpose configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py --launcher slurm \
--gpus 16 --gpus-per-node 8 --partition $PARTITION
```

### Testing commands

**To test with single GPU:**

```shell
mim test mmpose configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py -C $CHECKPOINT
```

**To test with multiple GPUs:**

```shell
mim test mmpose configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py -C $CHECKPOINT --launcher pytorch --gpus 8
```

**To test with multiple GPUs by slurm:**

```shell
mim test mmpose configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py -C $CHECKPOINT --launcher slurm \
--gpus 16 --gpus-per-node 8 --partition $PARTITION
```

## Results

WFLW

| Arch | Input Size | NME<sub>*test*</sub> | NME<sub>*pose*</sub> | NME<sub>*illumination*</sub> | NME<sub>*occlusion*</sub> | NME<sub>*blur*</sub> | NME<sub>*makeup*</sub> | NME<sub>*expression*</sub> | ckpt | log |
| :--------- | :--------: | :------------------: | :------------------: | :--------------------------: | :-----------------------: | :------------------: | :--------------------: | :------------------------: | :--------: | :-------: |
| [skps](/configs/td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py) | 256x256 | 3.88 | 6.60 | 3.81 | 4.57 | 4.44 | 3.75 | 4.13 | [ckpt](https://drive.google.com/file/d/10U7f_pp0BPMhm575cO7Vg13nZVCy-PfN/view?usp=sharing) | [log](https://drive.google.com/file/d/1fBNcAyMdRr9nTN8wHqvYPnd9AFBinOk4/view?usp=sharing) |

COFW

| Arch | Input Size | NME | ckpt | log |
| :------------------------------------------------------------- | :--------: | :--: | :------------------------------------------------------------: | :------------------------------------------------------------: |
| [skps](/configs/td-hm_hrnetv2-w18_skps-1xb16-160e_cofw-256x256.py) | 256x256 | 3.20 | [ckpt](https://drive.google.com/file/d/1fdYQ0ajg11KAdkeLVCi_jPdSTKf9i3hP/view?usp=sharing) | [log](https://drive.google.com/file/d/1IUjIY_sLbO6YO59G7XZGUHDVC-o_k09y/view?usp=sharing) |
Original file line number Diff line number Diff line change
@@ -0,0 +1,176 @@
custom_imports = dict(imports=['custom_codecs', 'models'])

_base_ = ['mmpose::_base_/default_runtime.py']

# runtime
train_cfg = dict(max_epochs=160, val_interval=1)

# optimizer
optim_wrapper = dict(
optimizer=dict(type='AdamW', lr=2e-3, weight_decay=0.0005))

# learning policy
param_scheduler = [
dict(
type='LinearLR', begin=0, end=500, start_factor=0.001,
by_epoch=False), # warm-up
dict(
type='MultiStepLR',
begin=0,
end=160,
milestones=[80, 120],
gamma=0.1,
by_epoch=True)
]

# automatically scaling LR based on the actual training batch size
auto_scale_lr = dict(base_batch_size=512)

# hooks
default_hooks = dict(checkpoint=dict(save_best='NME', rule='less', interval=1))

# codec settings
codec = dict(
type='SKPSHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2)

# model settings
model = dict(
type='TopdownPoseEstimator',
data_preprocessor=dict(
type='PoseDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(18, 36)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(18, 36, 72)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(18, 36, 72, 144),
multiscale_output=True),
upsample=dict(mode='bilinear', align_corners=False)),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18'),
),
neck=dict(
type='FeatureMapProcessor',
concat=True,
),
head=dict(
type='SKPSHead',
in_channels=270,
out_channels=29,
conv_out_channels=(270, ),
conv_kernel_sizes=(1, ),
heatmap_loss=dict(type='AdaptiveWingLoss', use_target_weight=True),
offside_loss=dict(type='AdaptiveWingLoss', use_target_weight=True),
decoder=codec),
test_cfg=dict(
flip_test=True,
flip_mode='heatmap',
shift_heatmap=True,
))

# base dataset settings
dataset_type = 'COFWDataset'
data_mode = 'topdown'
data_root = 'data/cofw/'

# pipelines
train_pipeline = [
dict(type='LoadImage'),
dict(type='GetBBoxCenterScale', padding=1),
dict(type='RandomFlip', direction='horizontal'),
dict(
type='Albumentation',
transforms=[
dict(type='RandomBrightnessContrast', p=0.5),
dict(type='HueSaturationValue', p=0.5),
dict(type='GaussianBlur', p=0.5),
dict(type='GaussNoise', p=0.1),
dict(
type='CoarseDropout',
max_holes=8,
max_height=0.2,
max_width=0.2,
min_holes=1,
min_height=0.1,
min_width=0.1,
p=0.5),
]),
dict(
type='RandomBBoxTransform',
shift_prob=0.,
rotate_factor=45,
scale_factor=(0.75, 1.25),
scale_prob=0),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='GenerateTarget', encoder=codec),
dict(type='PackPoseInputs')
]
val_pipeline = [
dict(type='LoadImage'),
dict(type='GetBBoxCenterScale', padding=1),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='PackPoseInputs')
]

# data loaders
train_dataloader = dict(
batch_size=16,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_mode=data_mode,
ann_file='annotations/cofw_train.json',
data_prefix=dict(img='images/'),
pipeline=train_pipeline,
))
val_dataloader = dict(
batch_size=32,
num_workers=4,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_mode=data_mode,
ann_file='annotations/cofw_test.json',
data_prefix=dict(img='images/'),
test_mode=True,
pipeline=val_pipeline,
))
test_dataloader = val_dataloader

# evaluators
val_evaluator = dict(
type='NME',
norm_mode='keypoint_distance',
)
test_evaluator = val_evaluator
Loading

0 comments on commit 210f6d0

Please sign in to comment.