From b49db3a75dc9427723517f6630be9f018dbda8eb Mon Sep 17 00:00:00 2001 From: JJ Lim | Eugene <79782049+JJLimmm@users.noreply.github.com> Date: Mon, 28 Mar 2022 16:08:44 +0800 Subject: [PATCH 1/7] Custom_config for waterpuddles dataset --- ...enetv2_fcn_4x4_512x512_40k_waterpuddles.py | 215 ++++++++++++++++++ 1 file changed, 215 insertions(+) create mode 100644 configs/bisenetv2/bisenetv2_fcn_4x4_512x512_40k_waterpuddles.py diff --git a/configs/bisenetv2/bisenetv2_fcn_4x4_512x512_40k_waterpuddles.py b/configs/bisenetv2/bisenetv2_fcn_4x4_512x512_40k_waterpuddles.py new file mode 100644 index 0000000000..3c711c4274 --- /dev/null +++ b/configs/bisenetv2/bisenetv2_fcn_4x4_512x512_40k_waterpuddles.py @@ -0,0 +1,215 @@ +### Only need to edit this config file to supercede the settings in other config files.s +# import os +### Base config files used within configs/_base_ +_base_ = [ + '../_base_/models/bisenetv2.py', # model settings + '../_base_/datasets/waterpuddles.py', # dataset settings + '../_base_/schedules/schedule_40k.py', # scheduler settings + '../_base_/default_runtime.py' # other runtime settings +] +# base_workdir = "/home/mind02/mmsegmentation/train_runs" +# current_workdir = "waterpuddles" +# new_workdir = os.path.join(base_workdir,current_workdir) +# if os.path.exists(new_workdir): + # for i in range(100): + # temp_workdir = os.path.join(new_workdir, str(i)) + # if os.path.exists(temp_workdir): + # continue + # else: + # work_dir = temp_workdir +work_dir = "train_runs/puddle_1000_chasedb" +### From configs/_base_/models configs +# model settings +# norm_cfg = dict(type='BN', requires_grad=True) # Segmentation usually uses SyncBN for multiple GPU training, BN if using 1 GPU +# model = dict( +# type='EncoderDecoder', # Name of segmentor +# pretrained=None, # The ImageNet pretrained backbone to be loaded +# backbone=dict( +# type='BiSeNetV2', +# detail_channels=(64, 64, 128), +# semantic_channels=(16, 32, 64, 128), +# semantic_expansion_ratio=6, +# bga_channels=128, +# out_indices=(0, 1, 2, 3, 4), +# init_cfg=None, +# align_corners=False), +# decode_head=dict( +# type='FCNHead', # Type of decode head. Please refer to mmseg/models/decode_heads for available options. +# in_channels=128, # Input channel of decode head. +# in_index=0, # The index of feature map to select. +# channels=1024, # The intermediate channels of decode head. +# num_convs=1, +# concat_input=False, +# dropout_ratio=0.1, # The dropout ratio before final classification layer. +# num_classes=1, # Number of segmentation class. Usually 19 for cityscapes, 21 for VOC, 150 for ADE20k. +# norm_cfg=norm_cfg, # The configuration of norm layer. +# align_corners=False, # The align_corners argument for resize in decoding. +# loss_decode=dict( # Config of loss function for the decode_head. +# type='CrossEntropyLoss', # Type of loss used for segmentation. +# use_sigmoid=False, # Whether use sigmoid activation for segmentation. +# loss_weight=1.0)), # Loss weight of decode head. +# auxiliary_head=[dict( +# type='FCNHead', # Type of auxiliary head. Please refer to mmseg/models/decode_heads for available options. +# in_channels=16, # Input channel of auxiliary head. +# channels=16, # The intermediate channels of decode head. +# num_convs=2, # Number of convs in FCNHead. It is usually 1 in auxiliary head. +# num_classes=1, # Number of segmentation class. Usually 19 for cityscapes, 21 for VOC, 150 for ADE20k. +# in_index=1, # The index of feature map to select. +# norm_cfg=norm_cfg, # The configuration of norm layer. +# concat_input=False, # Whether concat output of convs with input before classification layer. +# align_corners=False, # The align_corners argument for resize in decoding. +# loss_decode=dict( # Config of loss function for the decode_head. +# type='CrossEntropyLoss', # Type of loss used for segmentation. +# use_sigmoid=False, # Whether use sigmoid activation for segmentation. +# loss_weight=1.0)), # Loss weight of auxiliary head, which is usually 0.4 of decode head. +# dict( +# type='FCNHead', +# in_channels=32, +# channels=64, +# num_convs=2, +# num_classes=19, +# in_index=2, +# norm_cfg=norm_cfg, +# concat_input=False, +# align_corners=False, +# loss_decode=dict( +# type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), +# dict( +# type='FCNHead', +# in_channels=64, +# channels=256, +# num_convs=2, +# num_classes=19, +# in_index=3, +# norm_cfg=norm_cfg, +# concat_input=False, +# align_corners=False, +# loss_decode=dict( +# type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), +# dict( +# type='FCNHead', +# in_channels=128, +# channels=1024, +# num_convs=2, +# num_classes=19, +# in_index=4, +# norm_cfg=norm_cfg, +# concat_input=False, +# align_corners=False, +# loss_decode=dict( +# type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) +# ] +# ) +# # model training and testing settings +# train_cfg=dict() +# test_cfg = dict(mode='whole') # The test mode, options are 'whole' and 'sliding'. 'whole': whole image fully-convolutional test. 'sliding': sliding crop window on the image. +# ############################################################### + +# ### From configs/_base_/datasets configs +# dataset_type = 'waterpuddlesDataset' +# data_root = 'data/waterpuddles' + +# img_norm_cfg = dict( +# mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# img_scale = (960, 999) +# crop_size = (128, 128) +# train_pipeline = [ +# dict(type='LoadImageFromFile'), +# dict(type='LoadAnnotations'), +# dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), +# dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), +# dict(type='RandomFlip', prob=0.5), +# dict(type='PhotoMetricDistortion'), +# dict(type='Normalize', **img_norm_cfg), +# dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), +# dict(type='DefaultFormatBundle'), +# dict(type='Collect', keys=['img', 'gt_semantic_seg']) +# ] +# test_pipeline = [ +# dict(type='LoadImageFromFile'), +# dict( +# type='MultiScaleFlipAug', +# img_scale=img_scale, +# # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], +# flip=False, +# transforms=[ +# dict(type='Resize', keep_ratio=True), +# dict(type='RandomFlip'), +# dict(type='Normalize', **img_norm_cfg), +# dict(type='ImageToTensor', keys=['img']), +# dict(type='Collect', keys=['img']) +# ]) +# ] +# data = dict( +# samples_per_gpu=4, +# workers_per_gpu=4, +# train=dict( +# type='RepeatDataset', +# times=40000, +# dataset=dict( +# type=dataset_type, +# data_root=data_root, +# img_dir='images/training', +# ann_dir='annotations/training', +# pipeline=train_pipeline)), +# val=dict( +# type=dataset_type, +# data_root=data_root, +# img_dir='images/validation', +# ann_dir='annotations/validation', +# pipeline=test_pipeline), +# test=dict( +# type=dataset_type, +# data_root=data_root, +# img_dir='images/validation', +# ann_dir='annotations/validation', +# pipeline=test_pipeline)) +# ################################################# + +# ### From From configs/_base_/schedules/schedule_{chosen epochs}.py +# # chosen epochs can be 20k, 40k, 80k, 160k, 320k + +# # optimizer +# optimizer = dict( # Config used to build optimizer, support all the optimizers in PyTorch whose arguments are also the same as those in PyTorch +# type='SGD', # Type of optimizers, refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/optimizer/default_constructor.py#L13 for more details +# lr=0.01, # Learning rate of optimizers, see detail usages of the parameters in the documentation of PyTorch +# momentum=0.9, # Momentum +# weight_decay=0.0005) # Weight decay of SGD +# optimizer_config = dict() # Config used to build the optimizer hook, refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py#L8 for implementation details. + +# # learning rate policy +# lr_config = dict( +# policy='poly', # The policy of scheduler, also support Step, CosineAnnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9. +# power=0.9, # The power of polynomial decay. +# min_lr=0.0001, # The minimum learning rate to stable the training. +# by_epoch=False) # Whether count by epoch or not. + +# # runtime settings +# runner = dict( + # type='IterBasedRunner', # Type of runner to use (i.e. IterBasedRunner or EpochBasedRunner) + # max_iters=160000) # Total number of iterations. For EpochBasedRunner use `max_epochs` +# checkpoint_config = dict( # Config to set the checkpoint hook, Refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/checkpoint.py for implementation. + # by_epoch=False, # Whether count by epoch or not. + # interval=4000) # The save interval. +# evaluation = dict( # The config to build the evaluation hook. Please refer to mmseg/core/evaluation/eval_hook.py for details. + # interval=16000, # The interval of evaluation. + # metric='mIoU', # The evaluation metric. + # pre_eval=True) +# ##################################################### + +# ### From configs/_base_/default_runtime.py +# # yapf:disable +# log_config = dict( # config to register logger hook +# interval=50, # Interval to print the log +# hooks=[ +# # dict(type='TensorboardLoggerHook') # The Tensorboard logger is also supported +# dict(type='TextLoggerHook', by_epoch=False) +# ]) +# # yapf:enable +# dist_params = dict(backend='nccl') # Parameters to setup distributed training, the port can also be set. +# log_level = 'INFO' # The level of logging. +# load_from = None # load models as a pre-trained model from a given path. This will not resume training. +# resume_from = None # Resume checkpoints from a given path, the training will be resumed from the iteration when the checkpoint's is saved. +# workflow = [('train', 1)] # Workflow for runner. [('train', 1)] means there is only one workflow and the workflow named 'train' is executed once. The workflow trains the model by 40000 iterations according to the `runner.max_iters`. +# cudnn_benchmark = True # Whether use cudnn_benchmark to speed up, which is fast for fixed input size. +# #################################################### From eb0a1ca233a4aa29f80011015c675da6273669d9 Mon Sep 17 00:00:00 2001 From: JJ Lim | Eugene <79782049+JJLimmm@users.noreply.github.com> Date: Mon, 28 Mar 2022 16:11:42 +0800 Subject: [PATCH 2/7] waterpuddles dataset base config --- configs/_base_/datasets/waterpuddles.py | 59 +++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 configs/_base_/datasets/waterpuddles.py diff --git a/configs/_base_/datasets/waterpuddles.py b/configs/_base_/datasets/waterpuddles.py new file mode 100644 index 0000000000..7828a5b465 --- /dev/null +++ b/configs/_base_/datasets/waterpuddles.py @@ -0,0 +1,59 @@ +# dataset settings +dataset_type = 'waterpuddlesDataset' +data_root = 'data/puddle-1000_chasedb_NEW' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=False) +img_scale = (360, 640) +crop_size = (128, 128) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] + +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=40000, + dataset=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/training', + ann_dir='annotations/training', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline)) From d197cdc5d3c443f331bad47697d87dff1d4248ac Mon Sep 17 00:00:00 2001 From: JJ Lim | Eugene <79782049+JJLimmm@users.noreply.github.com> Date: Mon, 28 Mar 2022 16:12:50 +0800 Subject: [PATCH 3/7] Update Bisenetv2 base config for waterpuddles --- configs/_base_/models/bisenetv2.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/configs/_base_/models/bisenetv2.py b/configs/_base_/models/bisenetv2.py index f8fffeecad..e244ba0414 100644 --- a/configs/_base_/models/bisenetv2.py +++ b/configs/_base_/models/bisenetv2.py @@ -20,7 +20,7 @@ num_convs=1, concat_input=False, dropout_ratio=0.1, - num_classes=19, + num_classes=2, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( @@ -31,7 +31,7 @@ in_channels=16, channels=16, num_convs=2, - num_classes=19, + num_classes=2, in_index=1, norm_cfg=norm_cfg, concat_input=False, @@ -43,7 +43,7 @@ in_channels=32, channels=64, num_convs=2, - num_classes=19, + num_classes=2, in_index=2, norm_cfg=norm_cfg, concat_input=False, @@ -55,7 +55,7 @@ in_channels=64, channels=256, num_convs=2, - num_classes=19, + num_classes=2, in_index=3, norm_cfg=norm_cfg, concat_input=False, @@ -67,7 +67,7 @@ in_channels=128, channels=1024, num_convs=2, - num_classes=19, + num_classes=2, in_index=4, norm_cfg=norm_cfg, concat_input=False, From 38ace107646ddd074dad67247d6df043dcf4cef3 Mon Sep 17 00:00:00 2001 From: JJ Lim | Eugene <79782049+JJLimmm@users.noreply.github.com> Date: Mon, 28 Mar 2022 16:13:33 +0800 Subject: [PATCH 4/7] Add files via upload --- configs/_base_/schedules/schedule_40k.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/configs/_base_/schedules/schedule_40k.py b/configs/_base_/schedules/schedule_40k.py index d2c5023259..251142f484 100644 --- a/configs/_base_/schedules/schedule_40k.py +++ b/configs/_base_/schedules/schedule_40k.py @@ -5,5 +5,5 @@ lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) # runtime settings runner = dict(type='IterBasedRunner', max_iters=40000) -checkpoint_config = dict(by_epoch=False, interval=4000) -evaluation = dict(interval=4000, metric='mIoU', pre_eval=True) +checkpoint_config = dict(by_epoch=False, interval=2000) +evaluation = dict(interval=2000, metric='mIoU', pre_eval=True) From d286b90a01c63db6f5ea38652cab3655fe6ae4e4 Mon Sep 17 00:00:00 2001 From: JJ Lim | Eugene <79782049+JJLimmm@users.noreply.github.com> Date: Mon, 28 Mar 2022 16:15:53 +0800 Subject: [PATCH 5/7] Custom Dataset Class --- mmseg/datasets/waterpuddles.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 mmseg/datasets/waterpuddles.py diff --git a/mmseg/datasets/waterpuddles.py b/mmseg/datasets/waterpuddles.py new file mode 100644 index 0000000000..813974cb18 --- /dev/null +++ b/mmseg/datasets/waterpuddles.py @@ -0,0 +1,27 @@ +import os.path as osp + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class waterpuddlesDataset(CustomDataset): + """water puddles dataset. + + In segmentation map annotation for Chase_db1, 0 stands for background, + which is included in 2 categories. ``reduce_zero_label`` is fixed to False. + The ``img_suffix`` is fixed to '_img.png' and ``seg_map_suffix`` is fixed to + '_anno.png'. + """ + + CLASSES = ('background','waterpuddle') + + PALETTE = [[120, 120, 120], [6, 230, 230]] + + def __init__(self, **kwargs): + super(waterpuddlesDataset, self).__init__( + img_suffix='.png', + seg_map_suffix='.png', + reduce_zero_label=False, + **kwargs) + assert osp.exists(self.img_dir) From 4d4cd97fd5afa842eb6b247efef2cf92a66ee898 Mon Sep 17 00:00:00 2001 From: JJ Lim | Eugene <79782049+JJLimmm@users.noreply.github.com> Date: Mon, 28 Mar 2022 16:17:07 +0800 Subject: [PATCH 6/7] Update class names --- mmseg/core/evaluation/class_names.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/mmseg/core/evaluation/class_names.py b/mmseg/core/evaluation/class_names.py index e3bff62314..e2e93698b8 100644 --- a/mmseg/core/evaluation/class_names.py +++ b/mmseg/core/evaluation/class_names.py @@ -120,10 +120,11 @@ def isaid_classes(): 'Soccer_ball_field', 'plane', 'Harbor' ] - -def stare_classes(): - """stare class names for external use.""" - return ['background', 'vessel'] +def waterpuddles_classes(): + """waterpuddles class names for external use.""" + return [ + 'background', 'waterpuddle' + ] def cityscapes_palette(): @@ -259,9 +260,8 @@ def isaid_palette(): [0, 0, 191], [0, 0, 255], [0, 191, 127], [0, 127, 191], [0, 127, 255], [0, 100, 155]] - -def stare_palette(): - """STARE palette for external use.""" +def waterpuddles_palette(): + """waterpuddles palette for external use.""" return [[120, 120, 120], [6, 230, 230]] @@ -278,7 +278,7 @@ def stare_palette(): 'coco_stuff164k' ], 'isaid': ['isaid', 'iSAID'], - 'stare': ['stare', 'STARE'] + 'waterpuddles': ['waterpuddles', 'waterpuddle'] } From c7a8e4b1b9a4ba94092889874a8f0e20ffef4f78 Mon Sep 17 00:00:00 2001 From: JJ Lim | Eugene <79782049+JJLimmm@users.noreply.github.com> Date: Tue, 29 Mar 2022 15:04:00 +0800 Subject: [PATCH 7/7] Update bisenetv2_fcn_4x4_512x512_40k_waterpuddles.py --- ...enetv2_fcn_4x4_512x512_40k_waterpuddles.py | 209 ------------------ 1 file changed, 209 deletions(-) diff --git a/configs/bisenetv2/bisenetv2_fcn_4x4_512x512_40k_waterpuddles.py b/configs/bisenetv2/bisenetv2_fcn_4x4_512x512_40k_waterpuddles.py index 3c711c4274..2e3be52d0b 100644 --- a/configs/bisenetv2/bisenetv2_fcn_4x4_512x512_40k_waterpuddles.py +++ b/configs/bisenetv2/bisenetv2_fcn_4x4_512x512_40k_waterpuddles.py @@ -1,215 +1,6 @@ -### Only need to edit this config file to supercede the settings in other config files.s -# import os -### Base config files used within configs/_base_ _base_ = [ '../_base_/models/bisenetv2.py', # model settings '../_base_/datasets/waterpuddles.py', # dataset settings '../_base_/schedules/schedule_40k.py', # scheduler settings '../_base_/default_runtime.py' # other runtime settings ] -# base_workdir = "/home/mind02/mmsegmentation/train_runs" -# current_workdir = "waterpuddles" -# new_workdir = os.path.join(base_workdir,current_workdir) -# if os.path.exists(new_workdir): - # for i in range(100): - # temp_workdir = os.path.join(new_workdir, str(i)) - # if os.path.exists(temp_workdir): - # continue - # else: - # work_dir = temp_workdir -work_dir = "train_runs/puddle_1000_chasedb" -### From configs/_base_/models configs -# model settings -# norm_cfg = dict(type='BN', requires_grad=True) # Segmentation usually uses SyncBN for multiple GPU training, BN if using 1 GPU -# model = dict( -# type='EncoderDecoder', # Name of segmentor -# pretrained=None, # The ImageNet pretrained backbone to be loaded -# backbone=dict( -# type='BiSeNetV2', -# detail_channels=(64, 64, 128), -# semantic_channels=(16, 32, 64, 128), -# semantic_expansion_ratio=6, -# bga_channels=128, -# out_indices=(0, 1, 2, 3, 4), -# init_cfg=None, -# align_corners=False), -# decode_head=dict( -# type='FCNHead', # Type of decode head. Please refer to mmseg/models/decode_heads for available options. -# in_channels=128, # Input channel of decode head. -# in_index=0, # The index of feature map to select. -# channels=1024, # The intermediate channels of decode head. -# num_convs=1, -# concat_input=False, -# dropout_ratio=0.1, # The dropout ratio before final classification layer. -# num_classes=1, # Number of segmentation class. Usually 19 for cityscapes, 21 for VOC, 150 for ADE20k. -# norm_cfg=norm_cfg, # The configuration of norm layer. -# align_corners=False, # The align_corners argument for resize in decoding. -# loss_decode=dict( # Config of loss function for the decode_head. -# type='CrossEntropyLoss', # Type of loss used for segmentation. -# use_sigmoid=False, # Whether use sigmoid activation for segmentation. -# loss_weight=1.0)), # Loss weight of decode head. -# auxiliary_head=[dict( -# type='FCNHead', # Type of auxiliary head. Please refer to mmseg/models/decode_heads for available options. -# in_channels=16, # Input channel of auxiliary head. -# channels=16, # The intermediate channels of decode head. -# num_convs=2, # Number of convs in FCNHead. It is usually 1 in auxiliary head. -# num_classes=1, # Number of segmentation class. Usually 19 for cityscapes, 21 for VOC, 150 for ADE20k. -# in_index=1, # The index of feature map to select. -# norm_cfg=norm_cfg, # The configuration of norm layer. -# concat_input=False, # Whether concat output of convs with input before classification layer. -# align_corners=False, # The align_corners argument for resize in decoding. -# loss_decode=dict( # Config of loss function for the decode_head. -# type='CrossEntropyLoss', # Type of loss used for segmentation. -# use_sigmoid=False, # Whether use sigmoid activation for segmentation. -# loss_weight=1.0)), # Loss weight of auxiliary head, which is usually 0.4 of decode head. -# dict( -# type='FCNHead', -# in_channels=32, -# channels=64, -# num_convs=2, -# num_classes=19, -# in_index=2, -# norm_cfg=norm_cfg, -# concat_input=False, -# align_corners=False, -# loss_decode=dict( -# type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), -# dict( -# type='FCNHead', -# in_channels=64, -# channels=256, -# num_convs=2, -# num_classes=19, -# in_index=3, -# norm_cfg=norm_cfg, -# concat_input=False, -# align_corners=False, -# loss_decode=dict( -# type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), -# dict( -# type='FCNHead', -# in_channels=128, -# channels=1024, -# num_convs=2, -# num_classes=19, -# in_index=4, -# norm_cfg=norm_cfg, -# concat_input=False, -# align_corners=False, -# loss_decode=dict( -# type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) -# ] -# ) -# # model training and testing settings -# train_cfg=dict() -# test_cfg = dict(mode='whole') # The test mode, options are 'whole' and 'sliding'. 'whole': whole image fully-convolutional test. 'sliding': sliding crop window on the image. -# ############################################################### - -# ### From configs/_base_/datasets configs -# dataset_type = 'waterpuddlesDataset' -# data_root = 'data/waterpuddles' - -# img_norm_cfg = dict( -# mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -# img_scale = (960, 999) -# crop_size = (128, 128) -# train_pipeline = [ -# dict(type='LoadImageFromFile'), -# dict(type='LoadAnnotations'), -# dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), -# dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), -# dict(type='RandomFlip', prob=0.5), -# dict(type='PhotoMetricDistortion'), -# dict(type='Normalize', **img_norm_cfg), -# dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), -# dict(type='DefaultFormatBundle'), -# dict(type='Collect', keys=['img', 'gt_semantic_seg']) -# ] -# test_pipeline = [ -# dict(type='LoadImageFromFile'), -# dict( -# type='MultiScaleFlipAug', -# img_scale=img_scale, -# # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], -# flip=False, -# transforms=[ -# dict(type='Resize', keep_ratio=True), -# dict(type='RandomFlip'), -# dict(type='Normalize', **img_norm_cfg), -# dict(type='ImageToTensor', keys=['img']), -# dict(type='Collect', keys=['img']) -# ]) -# ] -# data = dict( -# samples_per_gpu=4, -# workers_per_gpu=4, -# train=dict( -# type='RepeatDataset', -# times=40000, -# dataset=dict( -# type=dataset_type, -# data_root=data_root, -# img_dir='images/training', -# ann_dir='annotations/training', -# pipeline=train_pipeline)), -# val=dict( -# type=dataset_type, -# data_root=data_root, -# img_dir='images/validation', -# ann_dir='annotations/validation', -# pipeline=test_pipeline), -# test=dict( -# type=dataset_type, -# data_root=data_root, -# img_dir='images/validation', -# ann_dir='annotations/validation', -# pipeline=test_pipeline)) -# ################################################# - -# ### From From configs/_base_/schedules/schedule_{chosen epochs}.py -# # chosen epochs can be 20k, 40k, 80k, 160k, 320k - -# # optimizer -# optimizer = dict( # Config used to build optimizer, support all the optimizers in PyTorch whose arguments are also the same as those in PyTorch -# type='SGD', # Type of optimizers, refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/optimizer/default_constructor.py#L13 for more details -# lr=0.01, # Learning rate of optimizers, see detail usages of the parameters in the documentation of PyTorch -# momentum=0.9, # Momentum -# weight_decay=0.0005) # Weight decay of SGD -# optimizer_config = dict() # Config used to build the optimizer hook, refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py#L8 for implementation details. - -# # learning rate policy -# lr_config = dict( -# policy='poly', # The policy of scheduler, also support Step, CosineAnnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9. -# power=0.9, # The power of polynomial decay. -# min_lr=0.0001, # The minimum learning rate to stable the training. -# by_epoch=False) # Whether count by epoch or not. - -# # runtime settings -# runner = dict( - # type='IterBasedRunner', # Type of runner to use (i.e. IterBasedRunner or EpochBasedRunner) - # max_iters=160000) # Total number of iterations. For EpochBasedRunner use `max_epochs` -# checkpoint_config = dict( # Config to set the checkpoint hook, Refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/checkpoint.py for implementation. - # by_epoch=False, # Whether count by epoch or not. - # interval=4000) # The save interval. -# evaluation = dict( # The config to build the evaluation hook. Please refer to mmseg/core/evaluation/eval_hook.py for details. - # interval=16000, # The interval of evaluation. - # metric='mIoU', # The evaluation metric. - # pre_eval=True) -# ##################################################### - -# ### From configs/_base_/default_runtime.py -# # yapf:disable -# log_config = dict( # config to register logger hook -# interval=50, # Interval to print the log -# hooks=[ -# # dict(type='TensorboardLoggerHook') # The Tensorboard logger is also supported -# dict(type='TextLoggerHook', by_epoch=False) -# ]) -# # yapf:enable -# dist_params = dict(backend='nccl') # Parameters to setup distributed training, the port can also be set. -# log_level = 'INFO' # The level of logging. -# load_from = None # load models as a pre-trained model from a given path. This will not resume training. -# resume_from = None # Resume checkpoints from a given path, the training will be resumed from the iteration when the checkpoint's is saved. -# workflow = [('train', 1)] # Workflow for runner. [('train', 1)] means there is only one workflow and the workflow named 'train' is executed once. The workflow trains the model by 40000 iterations according to the `runner.max_iters`. -# cudnn_benchmark = True # Whether use cudnn_benchmark to speed up, which is fast for fixed input size. -# ####################################################