Skip to content

Commit 68247e3

Browse files
CastleDreamweichengkuo
authored andcommitted
[CodeCamp2023-154] Add semantic label to the segmentation visualization results (open-mmlab#3229)
Thanks for your contribution and we appreciate it a lot. The following instructions would make your pull request more healthy and more easily get feedback. If you do not understand some items, don't worry, just make the pull request and seek help from maintainers. ## Motivation [Add semantic label to the segmentation visualization results 分割可视化结果中加上语义信息 open-mmlab#154](open-mmlab/OpenMMLabCamp#154) corresponding issue: [跑出来结果之后怎么在结果图片上获取各个语意部分的区域信息? open-mmlab#2578](open-mmlab#2578) ## Modification 1. mmseg/apis/inference.py, add withLabels in visualizer.add_datasample call, to indicate whether add semantic label 2. mmseg/visualization/local_visualizer.py, add semantic labels by opencv; modify the demo comment description 3. mmseg/utils/__init__.py, add bdd100k datasets to test local_visualizer.py **Current visualize result** <img width="637" alt="image" src="https://github.com/open-mmlab/mmsegmentation/assets/35064479/6ef6ce02-1d82-46f8-bde9-a1d69ff62df8"> **Add semantic label** <img width="637" alt="image" src="https://github.com/open-mmlab/mmsegmentation/assets/35064479/00716679-b43a-4794-8499-9bfecdb4b78b"> ## Test results **tests/test_visualization/test_local_visualizer.py** test results:(MMSegmentation/tests/data/pseudo_cityscapes_dataset/leftImg8bit/val/frankfurt/frankfurt_000000_000294_leftImg8bit.png) <img width="643" alt="image" src="https://github.com/open-mmlab/mmsegmentation/assets/35064479/6792b7d2-2512-4ea9-8500-1a7ed2d5e0dc"> **demo/inference_demo.ipynb** test results: <img width="966" alt="image" src="https://github.com/open-mmlab/mmsegmentation/assets/35064479/dfc0147e-fb1a-490a-b6ff-a8b209352d9b"> ----- ## Drawbacks config opencv thickness according to image size <img width="496" alt="image" src="https://github.com/open-mmlab/mmsegmentation/assets/35064479/0a54d72c-62b1-422c-89ae-69dc753fe0fc"> I have no idea of dealing with label overlapping for the time being
1 parent dd90131 commit 68247e3

File tree

3 files changed

+87
-12
lines changed

3 files changed

+87
-12
lines changed

mmseg/apis/inference.py

+7-1
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,7 @@ def show_result_pyplot(model: BaseSegmentor,
158158
draw_pred: bool = True,
159159
wait_time: float = 0,
160160
show: bool = True,
161+
withLabels: Optional[bool] = True,
161162
save_dir=None,
162163
out_file=None):
163164
"""Visualize the segmentation results on the image.
@@ -177,10 +178,14 @@ def show_result_pyplot(model: BaseSegmentor,
177178
that means "forever". Defaults to 0.
178179
show (bool): Whether to display the drawn image.
179180
Default to True.
181+
withLabels(bool, optional): Add semantic labels in visualization
182+
result, Default to True.
180183
save_dir (str, optional): Save file dir for all storage backends.
181184
If it is None, the backend storage will not save any data.
182185
out_file (str, optional): Path to output file. Default to None.
183186
187+
188+
184189
Returns:
185190
np.ndarray: the drawn image which channel is RGB.
186191
"""
@@ -208,7 +213,8 @@ def show_result_pyplot(model: BaseSegmentor,
208213
draw_pred=draw_pred,
209214
wait_time=wait_time,
210215
out_file=out_file,
211-
show=show)
216+
show=show,
217+
withLabels=withLabels)
212218
vis_img = visualizer.get_image()
213219

214220
return vis_img

mmseg/utils/__init__.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
# Copyright (c) OpenMMLab. All rights reserved.
22
# yapf: disable
3-
from .class_names import (ade_classes, ade_palette, cityscapes_classes,
3+
from .class_names import (ade_classes, ade_palette, bdd100k_classes,
4+
bdd100k_palette, cityscapes_classes,
45
cityscapes_palette, cocostuff_classes,
56
cocostuff_palette, dataset_aliases, get_classes,
67
get_palette, isaid_classes, isaid_palette,
@@ -27,5 +28,6 @@
2728
'cityscapes_palette', 'ade_palette', 'voc_palette', 'cocostuff_palette',
2829
'loveda_palette', 'potsdam_palette', 'vaihingen_palette', 'isaid_palette',
2930
'stare_palette', 'dataset_aliases', 'get_classes', 'get_palette',
30-
'datafrombytes', 'synapse_palette', 'synapse_classes'
31+
'datafrombytes', 'synapse_palette', 'synapse_classes', 'bdd100k_classes',
32+
'bdd100k_palette'
3133
]

mmseg/visualization/local_visualizer.py

+76-9
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,10 @@
11
# Copyright (c) OpenMMLab. All rights reserved.
22
from typing import Dict, List, Optional
33

4+
import cv2
45
import mmcv
56
import numpy as np
7+
import torch
68
from mmengine.dist import master_only
79
from mmengine.structures import PixelData
810
from mmengine.visualization import Visualizer
@@ -42,8 +44,8 @@ class SegLocalVisualizer(Visualizer):
4244
>>> import numpy as np
4345
>>> import torch
4446
>>> from mmengine.structures import PixelData
45-
>>> from mmseg.data import SegDataSample
46-
>>> from mmseg.engine.visualization import SegLocalVisualizer
47+
>>> from mmseg.structures import SegDataSample
48+
>>> from mmseg.visualization import SegLocalVisualizer
4749
4850
>>> seg_local_visualizer = SegLocalVisualizer()
4951
>>> image = np.random.randint(0, 256,
@@ -60,7 +62,7 @@ class SegLocalVisualizer(Visualizer):
6062
>>> seg_local_visualizer.add_datasample(
6163
... 'visualizer_example', image,
6264
... gt_seg_data_sample, show=True)
63-
""" # noqa
65+
""" # noqa
6466

6567
def __init__(self,
6668
name: str = 'visualizer',
@@ -76,9 +78,32 @@ def __init__(self,
7678
self.alpha: float = alpha
7779
self.set_dataset_meta(palette, classes, dataset_name)
7880

79-
def _draw_sem_seg(self, image: np.ndarray, sem_seg: PixelData,
81+
def _get_center_loc(self, mask: np.ndarray) -> np.ndarray:
82+
"""Get semantic seg center coordinate.
83+
84+
Args:
85+
mask: np.ndarray: get from sem_seg
86+
"""
87+
loc = np.argwhere(mask == 1)
88+
89+
loc_sort = np.array(
90+
sorted(loc.tolist(), key=lambda row: (row[0], row[1])))
91+
y_list = loc_sort[:, 0]
92+
unique, indices, counts = np.unique(
93+
y_list, return_index=True, return_counts=True)
94+
y_loc = unique[counts.argmax()]
95+
y_most_freq_loc = loc[loc_sort[:, 0] == y_loc]
96+
center_num = len(y_most_freq_loc) // 2
97+
x = y_most_freq_loc[center_num][1]
98+
y = y_most_freq_loc[center_num][0]
99+
return np.array([x, y])
100+
101+
def _draw_sem_seg(self,
102+
image: np.ndarray,
103+
sem_seg: PixelData,
80104
classes: Optional[List],
81-
palette: Optional[List]) -> np.ndarray:
105+
palette: Optional[List],
106+
withLabels: Optional[bool] = True) -> np.ndarray:
82107
"""Draw semantic seg of GT or prediction.
83108
84109
Args:
@@ -94,6 +119,8 @@ def _draw_sem_seg(self, image: np.ndarray, sem_seg: PixelData,
94119
palette (list, optional): Input palette for result rendering, which
95120
is a list of color palette responding to the classes.
96121
Defaults to None.
122+
withLabels(bool, optional): Add semantic labels in visualization
123+
result, Default to True.
97124
98125
Returns:
99126
np.ndarray: the drawn image which channel is RGB.
@@ -112,6 +139,43 @@ def _draw_sem_seg(self, image: np.ndarray, sem_seg: PixelData,
112139
for label, color in zip(labels, colors):
113140
mask[sem_seg[0] == label, :] = color
114141

142+
if withLabels:
143+
font = cv2.FONT_HERSHEY_SIMPLEX
144+
# (0,1] to change the size of the text relative to the image
145+
scale = 0.05
146+
fontScale = min(image.shape[0], image.shape[1]) / (25 / scale)
147+
fontColor = (255, 255, 255)
148+
if image.shape[0] < 300 or image.shape[1] < 300:
149+
thickness = 1
150+
rectangleThickness = 1
151+
else:
152+
thickness = 2
153+
rectangleThickness = 2
154+
lineType = 2
155+
156+
if isinstance(sem_seg[0], torch.Tensor):
157+
masks = sem_seg[0].numpy() == labels[:, None, None]
158+
else:
159+
masks = sem_seg[0] == labels[:, None, None]
160+
masks = masks.astype(np.uint8)
161+
for mask_num in range(len(labels)):
162+
classes_id = labels[mask_num]
163+
classes_color = colors[mask_num]
164+
loc = self._get_center_loc(masks[mask_num])
165+
text = classes[classes_id]
166+
(label_width, label_height), baseline = cv2.getTextSize(
167+
text, font, fontScale, thickness)
168+
mask = cv2.rectangle(mask, loc,
169+
(loc[0] + label_width + baseline,
170+
loc[1] + label_height + baseline),
171+
classes_color, -1)
172+
mask = cv2.rectangle(mask, loc,
173+
(loc[0] + label_width + baseline,
174+
loc[1] + label_height + baseline),
175+
(0, 0, 0), rectangleThickness)
176+
mask = cv2.putText(mask, text, (loc[0], loc[1] + label_height),
177+
font, fontScale, fontColor, thickness,
178+
lineType)
115179
color_seg = (image * (1 - self.alpha) + mask * self.alpha).astype(
116180
np.uint8)
117181
self.set_image(color_seg)
@@ -137,7 +201,7 @@ def set_dataset_meta(self,
137201
visulizer will use the meta information of the dataset i.e.
138202
classes and palette, but the `classes` and `palette` have
139203
higher priority. Defaults to None.
140-
""" # noqa
204+
""" # noqa
141205
# Set default value. When calling
142206
# `SegLocalVisualizer().dataset_meta=xxx`,
143207
# it will override the default value.
@@ -161,7 +225,8 @@ def add_datasample(
161225
wait_time: float = 0,
162226
# TODO: Supported in mmengine's Viusalizer.
163227
out_file: Optional[str] = None,
164-
step: int = 0) -> None:
228+
step: int = 0,
229+
withLabels: Optional[bool] = True) -> None:
165230
"""Draw datasample and save to all backends.
166231
167232
- If GT and prediction are plotted at the same time, they are
@@ -187,6 +252,8 @@ def add_datasample(
187252
wait_time (float): The interval of show (s). Defaults to 0.
188253
out_file (str): Path to output file. Defaults to None.
189254
step (int): Global step value to record. Defaults to 0.
255+
withLabels(bool, optional): Add semantic labels in visualization
256+
result, Defaults to True.
190257
"""
191258
classes = self.dataset_meta.get('classes', None)
192259
palette = self.dataset_meta.get('palette', None)
@@ -202,7 +269,7 @@ def add_datasample(
202269
'segmentation results.'
203270
gt_img_data = self._draw_sem_seg(gt_img_data,
204271
data_sample.gt_sem_seg, classes,
205-
palette)
272+
palette, withLabels)
206273

207274
if (draw_pred and data_sample is not None
208275
and 'pred_sem_seg' in data_sample):
@@ -213,7 +280,7 @@ def add_datasample(
213280
'segmentation results.'
214281
pred_img_data = self._draw_sem_seg(pred_img_data,
215282
data_sample.pred_sem_seg,
216-
classes, palette)
283+
classes, palette, withLabels)
217284

218285
if gt_img_data is not None and pred_img_data is not None:
219286
drawn_img = np.concatenate((gt_img_data, pred_img_data), axis=1)

0 commit comments

Comments
 (0)