Skip to content

Commit

Permalink
[Feature&Doc]Modify ptq pipeline and support lsq (open-mmlab#435)
Browse files Browse the repository at this point in the history
* modify ptq pipeline and support lsq

* use placeholder

* fix lsq && quantloop

* add lsq pytest

* add quant loop pytest

* test lsq observer

* fix bug under pt13

* fix reset_min_max_vals

* fix bugs under pt13

* fix configs

* add get_qconfig_mapping

* delete is_qat, add doc and fix pytest

* delete useless codes in custom_tracer

* skip pytest under pt13

* add todo: check freezebn

* fix pytest bugs

* fix pytest

* fix pytest

* fix pytest
  • Loading branch information
HIT-cwh authored and humu789 committed Apr 11, 2023
1 parent af6ed31 commit 42ca543
Show file tree
Hide file tree
Showing 26 changed files with 1,550 additions and 144 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,14 @@
model = dict(
_delete_=True,
type='mmrazor.MMArchitectureQuant',
data_preprocessor=dict(
type='mmcls.ClsDataPreprocessor',
num_classes=1000,
# RGB format normalization parameters
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
# convert image from BGR to RGB
to_rgb=True),
architecture=_base_.model,
float_checkpoint=float_checkpoint,
quantizer=dict(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,14 @@
model = dict(
_delete_=True,
type='mmrazor.MMArchitectureQuant',
data_preprocessor=dict(
type='mmcls.ClsDataPreprocessor',
num_classes=1000,
# RGB format normalization parameters
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
# convert image from BGR to RGB
to_rgb=True),
architecture=_base_.model,
float_checkpoint=float_checkpoint,
quantizer=dict(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,14 @@
model = dict(
_delete_=True,
type='mmrazor.MMArchitectureQuant',
data_preprocessor=dict(
type='mmcls.ClsDataPreprocessor',
num_classes=1000,
# RGB format normalization parameters
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
# convert image from BGR to RGB
to_rgb=True),
architecture=_base_.model,
float_checkpoint=float_checkpoint,
quantizer=dict(
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
_base_ = ['mmdet::retinanet/retinanet_r50_fpn_1x_coco.py']

train_dataloader = dict(batch_size=32)

test_cfg = dict(
type='mmrazor.PTQLoop',
calibrate_dataloader=train_dataloader,
calibrate_steps=32,
)

retina = _base_.model
float_checkpoint = 'https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth' # noqa: E501

global_qconfig = dict(
w_observer=dict(type='mmrazor.PerChannelMinMaxObserver'),
a_observer=dict(type='mmrazor.MovingAverageMinMaxObserver'),
w_fake_quant=dict(type='mmrazor.FakeQuantize'),
a_fake_quant=dict(type='mmrazor.FakeQuantize'),
w_qscheme=dict(
qdtype='qint8', bit=8, is_symmetry=True, is_symmetric_range=True),
a_qscheme=dict(qdtype='quint8', bit=8, is_symmetry=True),
)

model = dict(
_delete_=True,
_scope_='mmrazor',
type='MMArchitectureQuant',
data_preprocessor=dict(
type='mmdet.DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
architecture=retina,
float_checkpoint=float_checkpoint,
quantizer=dict(
type='mmrazor.OpenVINOQuantizer',
global_qconfig=global_qconfig,
tracer=dict(
type='mmrazor.CustomTracer',
skipped_methods=[
'mmdet.models.dense_heads.base_dense_head.BaseDenseHead.predict_by_feat', # noqa: E501
'mmdet.models.dense_heads.anchor_head.AnchorHead.loss_by_feat',
])))

model_wrapper_cfg = dict(
type='mmrazor.MMArchitectureQuantDDP',
broadcast_buffers=False,
find_unused_parameters=True)
61 changes: 0 additions & 61 deletions configs/quantization/qat/lsq_openvino_resnet18_8xb16_cifar10.py

This file was deleted.

64 changes: 64 additions & 0 deletions configs/quantization/qat/lsq_openvino_resnet18_8xb32_in1k.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
_base_ = ['mmcls::resnet/resnet18_8xb32_in1k.py']

resnet = _base_.model
float_checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.pth' # noqa: E501

global_qconfig = dict(
w_observer=dict(type='mmrazor.LSQPerChannelObserver'),
a_observer=dict(type='mmrazor.LSQObserver'),
w_fake_quant=dict(type='mmrazor.LearnableFakeQuantize'),
a_fake_quant=dict(type='mmrazor.LearnableFakeQuantize'),
w_qscheme=dict(
qdtype='qint8', bit=8, is_symmetry=True, is_symmetric_range=True),
a_qscheme=dict(qdtype='quint8', bit=8, is_symmetry=True),
)

model = dict(
_delete_=True,
_scope_='mmrazor',
type='MMArchitectureQuant',
data_preprocessor=dict(
type='mmcls.ClsDataPreprocessor',
num_classes=1000,
# RGB format normalization parameters
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
# convert image from BGR to RGB
to_rgb=True),
architecture=resnet,
float_checkpoint=float_checkpoint,
quantizer=dict(
type='mmrazor.OpenVINOQuantizer',
global_qconfig=global_qconfig,
tracer=dict(
type='mmrazor.CustomTracer',
skipped_methods=[
'mmcls.models.heads.ClsHead._get_loss',
'mmcls.models.heads.ClsHead._get_predictions'
])))

optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.0001, momentum=0.9, weight_decay=0.0001))

# learning policy
param_scheduler = dict(
_delete_=True,
type='CosineAnnealingLR',
T_max=100,
by_epoch=True,
begin=0,
end=100)

model_wrapper_cfg = dict(
type='mmrazor.MMArchitectureQuantDDP',
broadcast_buffers=False,
find_unused_parameters=True)

# train, val, test setting
train_cfg = dict(
_delete_=True,
type='mmrazor.LSQEpochBasedLoop',
max_epochs=100,
val_interval=1)
val_cfg = dict(_delete_=True, type='mmrazor.QATValLoop')
test_cfg = val_cfg
10 changes: 6 additions & 4 deletions mmrazor/engine/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,16 @@
from .optimizers import SeparateOptimWrapperConstructor
from .runner import (AutoSlimGreedySearchLoop, DartsEpochBasedTrainLoop,
DartsIterBasedTrainLoop, EvolutionSearchLoop,
GreedySamplerTrainLoop, PTQLoop, QATEpochBasedLoop,
SelfDistillValLoop, SingleTeacherDistillValLoop,
SlimmableValLoop, SubnetValLoop)
GreedySamplerTrainLoop, LSQEpochBasedLoop, PTQLoop,
QATEpochBasedLoop, QATValLoop, SelfDistillValLoop,
SingleTeacherDistillValLoop, SlimmableValLoop,
SubnetValLoop)

__all__ = [
'SeparateOptimWrapperConstructor', 'DumpSubnetHook',
'SingleTeacherDistillValLoop', 'DartsEpochBasedTrainLoop',
'DartsIterBasedTrainLoop', 'SlimmableValLoop', 'EvolutionSearchLoop',
'GreedySamplerTrainLoop', 'EstimateResourcesHook', 'SelfDistillValLoop',
'AutoSlimGreedySearchLoop', 'SubnetValLoop', 'PTQLoop', 'QATEpochBasedLoop'
'AutoSlimGreedySearchLoop', 'SubnetValLoop', 'PTQLoop',
'QATEpochBasedLoop', 'LSQEpochBasedLoop', 'QATValLoop'
]
5 changes: 3 additions & 2 deletions mmrazor/engine/runner/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
from .distill_val_loop import SelfDistillValLoop, SingleTeacherDistillValLoop
from .evolution_search_loop import EvolutionSearchLoop
from .iteprune_val_loop import ItePruneValLoop
from .quantization_loops import PTQLoop, QATEpochBasedLoop
from .quantization_loops import (LSQEpochBasedLoop, PTQLoop, QATEpochBasedLoop,
QATValLoop)
from .slimmable_val_loop import SlimmableValLoop
from .subnet_sampler_loop import GreedySamplerTrainLoop
from .subnet_val_loop import SubnetValLoop
Expand All @@ -14,5 +15,5 @@
'DartsIterBasedTrainLoop', 'SlimmableValLoop', 'EvolutionSearchLoop',
'GreedySamplerTrainLoop', 'SubnetValLoop', 'SelfDistillValLoop',
'ItePruneValLoop', 'AutoSlimGreedySearchLoop', 'QATEpochBasedLoop',
'PTQLoop'
'PTQLoop', 'LSQEpochBasedLoop', 'QATValLoop'
]
Loading

0 comments on commit 42ca543

Please sign in to comment.