Skip to content

Commit

Permalink
Linting (#207)
Browse files Browse the repository at this point in the history
* add pylintrc

Signed-off-by: lizz <lizz@sensetime.com>

* python3 style super

Signed-off-by: lizz <lizz@sensetime.com>

* add

Signed-off-by: lizz <lizz@sensetime.com>

* lint

Signed-off-by: lizz <lizz@sensetime.com>

* no (object)

Signed-off-by: lizz <lizz@sensetime.com>

* tiny

Signed-off-by: lizz <lizz@sensetime.com>

* ha

Signed-off-by: lizz <lizz@sensetime.com>

* typos

Signed-off-by: lizz <lizz@sensetime.com>

* typo

Signed-off-by: lizz <lizz@sensetime.com>

* typo

Signed-off-by: lizz <lizz@sensetime.com>

* lint

Signed-off-by: lizz <lizz@sensetime.com>

* lint

Signed-off-by: lizz <lizz@sensetime.com>

* more lint

Signed-off-by: lizz <lizz@sensetime.com>

* Fix out_channels unused bug in EDVRNet

Signed-off-by: lizz <lizz@sensetime.com>

* lint

Signed-off-by: lizz <lizz@sensetime.com>
  • Loading branch information
innerlee authored Feb 25, 2021
1 parent dcc2159 commit 0e5332f
Show file tree
Hide file tree
Showing 110 changed files with 994 additions and 379 deletions.
621 changes: 621 additions & 0 deletions .pylintrc

Large diffs are not rendered by default.

10 changes: 5 additions & 5 deletions mmedit/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,15 @@


def digit_version(version_str):
digit_version = []
digit_ver = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
digit_ver.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
digit_version.append(int(patch_version[0]) - 1)
digit_version.append(int(patch_version[1]))
return digit_version
digit_ver.append(int(patch_version[0]) - 1)
digit_ver.append(int(patch_version[1]))
return digit_ver


mmcv_min_version = digit_version(MMCV_MIN)
Expand Down
55 changes: 28 additions & 27 deletions mmedit/apis/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,21 +162,21 @@ def collect_results_cpu(result_part, size, tmpdir=None):
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results

# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results


def collect_results_gpu(result_part, size):
Expand Down Expand Up @@ -211,15 +211,16 @@ def collect_results_gpu(result_part, size):
# gather all result part
dist.all_gather(part_recv_list, part_send)

if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_list.append(
pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
if rank != 0:
return None

part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_list.append(pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
2 changes: 1 addition & 1 deletion mmedit/core/distributed_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def __init__(self,
broadcast_buffers=False,
find_unused_parameters=False,
**kwargs):
super(DistributedDataParallelWrapper, self).__init__()
super().__init__()
assert len(device_ids) == 1, (
'Currently, DistributedDataParallelWrapper only supports one'
'single CUDA device for each process.'
Expand Down
3 changes: 1 addition & 2 deletions mmedit/core/evaluation/eval_hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,7 @@ def __init__(self,
interval=1,
gpu_collect=False,
**eval_kwargs):
super(DistEvalIterHook, self).__init__(dataloader, interval,
**eval_kwargs)
super().__init__(dataloader, interval, **eval_kwargs)
self.gpu_collect = gpu_collect

def after_train_iter(self, runner):
Expand Down
4 changes: 3 additions & 1 deletion mmedit/core/evaluation/metric_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,9 @@ def gauss_filter(sigma, epsilon=1e-2):
def gauss_gradient(img, sigma):
"""Gaussian gradient.
From https://www.mathworks.com/matlabcentral/mlc-downloads/downloads/submissions/8060/versions/2/previews/gaussgradient/gaussgradient.m/index.html # noqa
From https://www.mathworks.com/matlabcentral/mlc-downloads/downloads/
submissions/8060/versions/2/previews/gaussgradient/gaussgradient.m/
index.html
Args:
img (ndarray): Input image.
Expand Down
25 changes: 12 additions & 13 deletions mmedit/core/evaluation/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ def sad(alpha, trimap, pred_alpha):
assert (pred_alpha[trimap == 255] == 255).all()
alpha = alpha.astype(np.float64) / 255
pred_alpha = pred_alpha.astype(np.float64) / 255
sad = np.abs(pred_alpha - alpha).sum() / 1000
return sad
sad_result = np.abs(pred_alpha - alpha).sum() / 1000
return sad_result


def mse(alpha, trimap, pred_alpha):
Expand All @@ -35,10 +35,10 @@ def mse(alpha, trimap, pred_alpha):
pred_alpha = pred_alpha.astype(np.float64) / 255
weight_sum = (trimap == 128).sum()
if weight_sum != 0:
mse = ((pred_alpha - alpha)**2).sum() / weight_sum
mse_result = ((pred_alpha - alpha)**2).sum() / weight_sum
else:
mse = 0
return mse
mse_result = 0
return mse_result


def gradient_error(alpha, trimap, pred_alpha, sigma=1.4):
Expand Down Expand Up @@ -100,7 +100,6 @@ def connectivity(alpha, trimap, pred_alpha, step=0.1):
alpha = alpha.astype(np.float32) / 255
pred_alpha = pred_alpha.astype(np.float32) / 255

height, width = alpha.shape
thresh_steps = np.arange(0, 1 + step, step)
round_down_map = -np.ones_like(alpha)
for i in range(1, len(thresh_steps)):
Expand Down Expand Up @@ -196,10 +195,10 @@ def psnr(img1, img2, crop_border=0, input_order='HWC'):
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, None]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, None]

mse = np.mean((img1 - img2)**2)
if mse == 0:
mse_value = np.mean((img1 - img2)**2)
if mse_value == 0:
return float('inf')
return 20. * np.log10(255. / np.sqrt(mse))
return 20. * np.log10(255. / np.sqrt(mse_value))


def _ssim(img1, img2):
Expand Down Expand Up @@ -280,7 +279,7 @@ def ssim(img1, img2, crop_border=0, input_order='HWC'):
return np.array(ssims).mean()


class L1Evaluation(object):
class L1Evaluation:
"""L1 evaluation metric.
Args:
Expand Down Expand Up @@ -347,8 +346,8 @@ def compute_feature(block):
# the products of pairs of adjacent coefficients computed along
# horizontal, vertical and diagonal orientations.
shifts = [[0, 1], [1, 0], [1, 1], [1, -1]]
for i in range(len(shifts)):
shifted_block = np.roll(block, shifts[i], axis=(0, 1))
for shift in shifts:
shifted_block = np.roll(block, shift, axis=(0, 1))
alpha, beta_l, beta_r = estimate_aggd_param(block * shifted_block)
mean = (beta_r - beta_l) * (gamma(2 / alpha) / gamma(1 / alpha))
feat.extend([alpha, mean, beta_l, beta_r])
Expand Down Expand Up @@ -408,7 +407,7 @@ def niqe_core(img,
feat = []
for idx_w in range(num_block_w):
for idx_h in range(num_block_h):
# process ecah block
# process each block
block = img_nomalized[idx_h * block_size_h //
scale:(idx_h + 1) * block_size_h //
scale, idx_w * block_size_w //
Expand Down
2 changes: 1 addition & 1 deletion mmedit/core/mask.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ def random_irregular_mask(img_shape,
angle = 2 * math.pi - angle
length = length_list[direct_n]
brush_w = brush_width_list[direct_n]
# compute end point accoriding to the random angle
# compute end point according to the random angle
end_x = (start_x + length * np.sin(angle)).astype(np.int32)
end_y = (start_y + length * np.cos(angle)).astype(np.int32)

Expand Down
5 changes: 3 additions & 2 deletions mmedit/core/optimizer/builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,11 +46,12 @@ def build_optimizers(model, cfgs):
for key, cfg in cfgs.items():
if not isinstance(cfg, dict):
is_dict_of_dict = False

if is_dict_of_dict:
for key, cfg in cfgs.items():
cfg_ = cfg.copy()
module = getattr(model, key)
optimizers[key] = build_optimizer(module, cfg_)
return optimizers
else:
return build_optimizer(model, cfgs)

return build_optimizer(model, cfgs)
13 changes: 7 additions & 6 deletions mmedit/core/scheduler/lr_updater.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class LinearLrUpdaterHook(LrUpdaterHook):
"""

def __init__(self, target_lr=0, start=0, interval=1, **kwargs):
super(LinearLrUpdaterHook, self).__init__(**kwargs)
super().__init__(**kwargs)
self.target_lr = target_lr
self.start = start
self.interval = interval
Expand All @@ -41,10 +41,11 @@ def get_lr(self, runner, base_lr):
progress = runner.iter
max_progress = runner.max_iters
assert max_progress >= self.start

if max_progress == self.start:
return base_lr
else:
# Before 'start', fix lr; After 'start', linearly update lr.
factor = (max(0, progress - self.start) // self.interval) / (
(max_progress - self.start) // self.interval)
return base_lr + (self.target_lr - base_lr) * factor

# Before 'start', fix lr; After 'start', linearly update lr.
factor = (max(0, progress - self.start) // self.interval) / (
(max_progress - self.start) // self.interval)
return base_lr + (self.target_lr - base_lr) * factor
8 changes: 4 additions & 4 deletions mmedit/datasets/base_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ class BaseDataset(Dataset, metaclass=ABCMeta):
"""

def __init__(self, pipeline, test_mode=False):
super(BaseDataset, self).__init__()
super().__init__()
self.test_mode = test_mode
self.pipeline = Compose(pipeline)

Expand Down Expand Up @@ -71,7 +71,7 @@ def __getitem__(self, idx):
Args:
idx (int): Index for getting each item.
"""
if not self.test_mode:
return self.prepare_train_data(idx)
else:
if self.test_mode:
return self.prepare_test_data(idx)

return self.prepare_train_data(idx)
2 changes: 1 addition & 1 deletion mmedit/datasets/base_matting_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ class BaseMattingDataset(BaseDataset):
"""

def __init__(self, ann_file, pipeline, data_prefix=None, test_mode=False):
super(BaseMattingDataset, self).__init__(pipeline, test_mode)
super().__init__(pipeline, test_mode)
self.ann_file = str(ann_file)
self.data_prefix = str(data_prefix)
self.data_infos = self.load_annotations()
Expand Down
2 changes: 1 addition & 1 deletion mmedit/datasets/base_sr_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ class BaseSRDataset(BaseDataset):
"""

def __init__(self, pipeline, scale, test_mode=False):
super(BaseSRDataset, self).__init__(pipeline, test_mode)
super().__init__(pipeline, test_mode)
self.scale = scale

@staticmethod
Expand Down
2 changes: 1 addition & 1 deletion mmedit/datasets/dataset_wrappers.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@


@DATASETS.register_module()
class RepeatDataset(object):
class RepeatDataset:
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
Expand Down
2 changes: 1 addition & 1 deletion mmedit/datasets/generation_paired_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ class GenerationPairedDataset(BaseGenerationDataset):
"""

def __init__(self, dataroot, pipeline, test_mode=False):
super(GenerationPairedDataset, self).__init__(pipeline, test_mode)
super().__init__(pipeline, test_mode)
phase = 'test' if test_mode else 'train'
self.dataroot = osp.join(str(dataroot), phase)
self.data_infos = self.load_annotations()
Expand Down
2 changes: 1 addition & 1 deletion mmedit/datasets/generation_unpaired_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class GenerationUnpairedDataset(BaseGenerationDataset):
"""

def __init__(self, dataroot, pipeline, test_mode=False):
super(GenerationUnpairedDataset, self).__init__(pipeline, test_mode)
super().__init__(pipeline, test_mode)
phase = 'test' if test_mode else 'train'
self.dataroot_a = osp.join(str(dataroot), phase + 'A')
self.dataroot_b = osp.join(str(dataroot), phase + 'B')
Expand Down
2 changes: 1 addition & 1 deletion mmedit/datasets/img_inpainting_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ class ImgInpaintingDataset(BaseDataset):
"""

def __init__(self, ann_file, pipeline, data_prefix=None, test_mode=False):
super(ImgInpaintingDataset, self).__init__(pipeline, test_mode)
super().__init__(pipeline, test_mode)
self.ann_file = str(ann_file)
self.data_prefix = str(data_prefix)
self.data_infos = self.load_annotations()
Expand Down
Loading

0 comments on commit 0e5332f

Please sign in to comment.