From 0b3cd1e6b9e4884b7ce67c08ca7dad0fca7bb357 Mon Sep 17 00:00:00 2001 From: Z-Fran <1396925302@qq.com> Date: Thu, 20 Oct 2022 12:56:30 +0800 Subject: [PATCH] fix edsr --- configs/edsr/edsr_x2c64b16_1xb16-300k_div2k.py | 6 ++++-- configs/edsr/edsr_x3c64b16_1xb16-300k_div2k.py | 8 +++++--- configs/edsr/edsr_x4c64b16_1xb16-300k_div2k.py | 8 +++++--- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/configs/edsr/edsr_x2c64b16_1xb16-300k_div2k.py b/configs/edsr/edsr_x2c64b16_1xb16-300k_div2k.py index 593753e342..ec5f7f4e09 100644 --- a/configs/edsr/edsr_x2c64b16_1xb16-300k_div2k.py +++ b/configs/edsr/edsr_x2c64b16_1xb16-300k_div2k.py @@ -73,7 +73,9 @@ data_root = 'data' train_dataloader = dict( - num_workers=4, + num_workers=8, + batch_size=16, + drop_last=True, persistent_workers=False, sampler=dict(type='InfiniteSampler', shuffle=True), dataset=dict( @@ -112,7 +114,7 @@ optim_wrapper = dict( constructor='DefaultOptimWrapperConstructor', type='OptimWrapper', - optimizer=dict(type='Adam', lr=1e-4, betas=(0.9, 0.99))) + optimizer=dict(type='Adam', lr=1e-4, betas=(0.9, 0.999))) # learning policy param_scheduler = dict( diff --git a/configs/edsr/edsr_x3c64b16_1xb16-300k_div2k.py b/configs/edsr/edsr_x3c64b16_1xb16-300k_div2k.py index be54ae082d..3ac6695d05 100644 --- a/configs/edsr/edsr_x3c64b16_1xb16-300k_div2k.py +++ b/configs/edsr/edsr_x3c64b16_1xb16-300k_div2k.py @@ -43,7 +43,7 @@ channel_order='rgb', imdecode_backend='cv2'), dict(type='SetValues', dictionary=dict(scale=scale)), - dict(type='PairedRandomCrop', gt_patch_size=96), + dict(type='PairedRandomCrop', gt_patch_size=144), dict( type='Flip', keys=['img', 'gt'], @@ -75,7 +75,9 @@ data_root = 'data' train_dataloader = dict( - num_workers=4, + num_workers=8, + batch_size=16, + drop_last=True, persistent_workers=False, sampler=dict(type='InfiniteSampler', shuffle=True), dataset=dict( @@ -114,7 +116,7 @@ optim_wrapper = dict( constructor='DefaultOptimWrapperConstructor', type='OptimWrapper', - optimizer=dict(type='Adam', lr=1e-4, betas=(0.9, 0.99))) + optimizer=dict(type='Adam', lr=1e-4, betas=(0.9, 0.999))) # learning policy param_scheduler = dict( diff --git a/configs/edsr/edsr_x4c64b16_1xb16-300k_div2k.py b/configs/edsr/edsr_x4c64b16_1xb16-300k_div2k.py index b57c5797d9..1e878cc725 100644 --- a/configs/edsr/edsr_x4c64b16_1xb16-300k_div2k.py +++ b/configs/edsr/edsr_x4c64b16_1xb16-300k_div2k.py @@ -43,7 +43,7 @@ channel_order='rgb', imdecode_backend='cv2'), dict(type='SetValues', dictionary=dict(scale=scale)), - dict(type='PairedRandomCrop', gt_patch_size=96), + dict(type='PairedRandomCrop', gt_patch_size=196), dict( type='Flip', keys=['img', 'gt'], @@ -75,7 +75,9 @@ data_root = 'data' train_dataloader = dict( - num_workers=4, + num_workers=8, + batch_size=16, + drop_last=True, persistent_workers=False, sampler=dict(type='InfiniteSampler', shuffle=True), dataset=dict( @@ -114,7 +116,7 @@ optim_wrapper = dict( constructor='DefaultOptimWrapperConstructor', type='OptimWrapper', - optimizer=dict(type='Adam', lr=1e-4, betas=(0.9, 0.99))) + optimizer=dict(type='Adam', lr=1e-4, betas=(0.9, 0.999))) # learning policy param_scheduler = dict(