Skip to content

Commit 4981ff6

Browse files
authored
[Fix] Fix docstring link problem in readthedocs (#845)
* fix docstring link * fix docstring link * fix docstring link * fix docstring link * fix docstring link * fix docstring link
1 parent a7461d9 commit 4981ff6

17 files changed

+52
-32
lines changed

mmseg/datasets/pipelines/formating.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -249,9 +249,9 @@ class Collect(object):
249249
keys (Sequence[str]): Keys of results to be collected in ``data``.
250250
meta_keys (Sequence[str], optional): Meta keys to be converted to
251251
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
252-
Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape',
253-
'pad_shape', 'scale_factor', 'flip', 'flip_direction',
254-
'img_norm_cfg')``
252+
Default: (``filename``, ``ori_filename``, ``ori_shape``,
253+
``img_shape``, ``pad_shape``, ``scale_factor``, ``flip``,
254+
``flip_direction``, ``img_norm_cfg``)
255255
"""
256256

257257
def __init__(self,

mmseg/models/backbones/cgnet.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -187,8 +187,8 @@ def forward(self, x):
187187
class CGNet(BaseModule):
188188
"""CGNet backbone.
189189
190-
A Light-weight Context Guided Network for Semantic Segmentation
191-
arXiv: https://arxiv.org/abs/1811.08201
190+
This backbone is the implementation of `A Light-weight Context Guided
191+
Network for Semantic Segmentation <https://arxiv.org/abs/1811.08201>`_.
192192
193193
Args:
194194
in_channels (int): Number of input image channels. Normally 3.

mmseg/models/backbones/fast_scnn.py

+3
Original file line numberDiff line numberDiff line change
@@ -272,6 +272,9 @@ def forward(self, higher_res_feature, lower_res_feature):
272272
class FastSCNN(BaseModule):
273273
"""Fast-SCNN Backbone.
274274
275+
This backbone is the implementation of `Fast-SCNN: Fast Semantic
276+
Segmentation Network <https://arxiv.org/abs/1902.04502>`_.
277+
275278
Args:
276279
in_channels (int): Number of input image channels. Default: 3.
277280
downsample_dw_channels (tuple[int]): Number of output channels after

mmseg/models/backbones/hrnet.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -218,8 +218,8 @@ def forward(self, x):
218218
class HRNet(BaseModule):
219219
"""HRNet backbone.
220220
221-
`High-Resolution Representations for Labeling Pixels and Regions
222-
arXiv: <https://arxiv.org/abs/1904.04514>`_.
221+
This backbone is the implementation of `High-Resolution Representations
222+
for Labeling Pixels and Regions <https://arxiv.org/abs/1904.04514>`_.
223223
224224
Args:
225225
extra (dict): Detailed configuration for each stage of HRNet.

mmseg/models/backbones/mit.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -246,9 +246,9 @@ def forward(self, x, hw_shape):
246246
class MixVisionTransformer(BaseModule):
247247
"""The backbone of Segformer.
248248
249-
A PyTorch implement of : `SegFormer: Simple and Efficient Design for
250-
Semantic Segmentation with Transformers` -
251-
https://arxiv.org/pdf/2105.15203.pdf
249+
This backbone is the implementation of `SegFormer: Simple and
250+
Efficient Design for Semantic Segmentation with
251+
Transformers <https://arxiv.org/abs/2105.15203>`_.
252252
253253
Args:
254254
in_channels (int): Number of input channels. Default: 3.

mmseg/models/backbones/mobilenet_v2.py

+4
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,10 @@
1414
class MobileNetV2(BaseModule):
1515
"""MobileNetV2 backbone.
1616
17+
This backbone is the implementation of
18+
`MobileNetV2: Inverted Residuals and Linear Bottlenecks
19+
<https://arxiv.org/abs/1801.04381>`_.
20+
1721
Args:
1822
widen_factor (float): Width multiplier, multiply number of
1923
channels in each layer by this amount. Default: 1.0.

mmseg/models/backbones/resnest.py

+3
Original file line numberDiff line numberDiff line change
@@ -271,6 +271,9 @@ def _inner_forward(x):
271271
class ResNeSt(ResNetV1d):
272272
"""ResNeSt backbone.
273273
274+
This backbone is the implementation of `ResNeSt:
275+
Split-Attention Networks <https://arxiv.org/abs/2004.08955>`_.
276+
274277
Args:
275278
groups (int): Number of groups of Bottleneck. Default: 1
276279
base_width (int): Base width of Bottleneck. Default: 4

mmseg/models/backbones/resnet.py

+7-5
Original file line numberDiff line numberDiff line change
@@ -311,6 +311,9 @@ def _inner_forward(x):
311311
class ResNet(BaseModule):
312312
"""ResNet backbone.
313313
314+
This backbone is the improved implementation of `Deep Residual Learning
315+
for Image Recognition <https://arxiv.org/abs/1512.03385>`_.
316+
314317
Args:
315318
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
316319
in_channels (int): Number of input image channels. Default: 3.
@@ -686,11 +689,10 @@ def train(self, mode=True):
686689
class ResNetV1c(ResNet):
687690
"""ResNetV1c variant described in [1]_.
688691
689-
Compared with default ResNet(ResNetV1b), ResNetV1c replaces the 7x7 conv
690-
in the input stem with three 3x3 convs.
691-
692-
References:
693-
.. [1] https://arxiv.org/pdf/1812.01187.pdf
692+
Compared with default ResNet(ResNetV1b), ResNetV1c replaces the 7x7 conv in
693+
the input stem with three 3x3 convs. For more details please refer to `Bag
694+
of Tricks for Image Classification with Convolutional Neural Networks
695+
<https://arxiv.org/abs/1812.01187>`_.
694696
"""
695697

696698
def __init__(self, **kwargs):

mmseg/models/backbones/resnext.py

+4
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,10 @@ def __init__(self,
8888
class ResNeXt(ResNet):
8989
"""ResNeXt backbone.
9090
91+
This backbone is the implementation of `Aggregated
92+
Residual Transformations for Deep Neural
93+
Networks <https://arxiv.org/abs/1611.05431>`_.
94+
9195
Args:
9296
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
9397
in_channels (int): Number of input image channels. Normally 3.

mmseg/models/backbones/swin.py

+5-6
Original file line numberDiff line numberDiff line change
@@ -522,13 +522,12 @@ def forward(self, x, hw_shape):
522522

523523
@BACKBONES.register_module()
524524
class SwinTransformer(BaseModule):
525-
""" Swin Transformer
526-
A PyTorch implement of : `Swin Transformer:
527-
Hierarchical Vision Transformer using Shifted Windows` -
528-
https://arxiv.org/abs/2103.14030
525+
"""Swin Transformer backbone.
529526
530-
Inspiration from
531-
https://github.com/microsoft/Swin-Transformer
527+
This backbone is the implementation of `Swin Transformer:
528+
Hierarchical Vision Transformer using Shifted
529+
Windows <https://arxiv.org/abs/2103.14030>`_.
530+
Inspiration from https://github.com/microsoft/Swin-Transformer.
532531
533532
Args:
534533
pretrain_img_size (int | tuple[int]): The size of input image when

mmseg/models/backbones/unet.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -224,8 +224,9 @@ def forward(self, x):
224224
@BACKBONES.register_module()
225225
class UNet(BaseModule):
226226
"""UNet backbone.
227-
U-Net: Convolutional Networks for Biomedical Image Segmentation.
228-
https://arxiv.org/pdf/1505.04597.pdf
227+
228+
This backbone is the implementation of `U-Net: Convolutional Networks
229+
for Biomedical Image Segmentation <https://arxiv.org/abs/1505.04597>`_.
229230
230231
Args:
231232
in_channels (int): Number of input image channels. Default" 3.
@@ -277,7 +278,6 @@ class UNet(BaseModule):
277278
The input image size should be divisible by the whole downsample rate
278279
of the encoder. More detail of the whole downsample rate can be found
279280
in UNet._check_input_divisible.
280-
281281
"""
282282

283283
def __init__(self,

mmseg/models/backbones/vit.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -98,9 +98,9 @@ def forward(self, x):
9898
class VisionTransformer(BaseModule):
9999
"""Vision Transformer.
100100
101-
A PyTorch implement of : `An Image is Worth 16x16 Words:
102-
Transformers for Image Recognition at Scale` -
103-
https://arxiv.org/abs/2010.11929
101+
This backbone is the implementation of `An Image is Worth 16x16 Words:
102+
Transformers for Image Recognition at
103+
Scale <https://arxiv.org/abs/2010.11929>`_.
104104
105105
Args:
106106
img_size (int | tuple): Input image size. Default: 224.

mmseg/models/decode_heads/point_head.py

+2
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,8 @@ def calculate_uncertainty(seg_logits):
3636
class PointHead(BaseCascadeDecodeHead):
3737
"""A mask point head use in PointRend.
3838
39+
This head is implemented of `PointRend: Image Segmentation as
40+
Rendering <https://arxiv.org/abs/1912.08193>`_.
3941
``PointHead`` use shared multi-layer perceptron (equivalent to
4042
nn.Conv1d) to predict the logit of input points. The fine-grained feature
4143
and coarse feature will be concatenate together for predication.

mmseg/models/decode_heads/sep_fcn_head.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,9 @@ class DepthwiseSeparableFCNHead(FCNHead):
1010
"""Depthwise-Separable Fully Convolutional Network for Semantic
1111
Segmentation.
1212
13-
This head is implemented according to Fast-SCNN paper.
13+
This head is implemented according to `Fast-SCNN: Fast Semantic
14+
Segmentation Network <https://arxiv.org/abs/1902.04502>`_.
15+
1416
Args:
1517
in_channels(int): Number of output channels of FFM.
1618
channels(int): Number of middle-stage channels in the decode head.

mmseg/models/necks/fpn.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,8 @@
1212
class FPN(BaseModule):
1313
"""Feature Pyramid Network.
1414
15-
This is an implementation of - Feature Pyramid Networks for Object
16-
Detection (https://arxiv.org/abs/1612.03144)
15+
This neck is the implementation of `Feature Pyramid Networks for Object
16+
Detection <https://arxiv.org/abs/1612.03144>`_.
1717
1818
Args:
1919
in_channels (List[int]): Number of input channels per scale.

mmseg/models/necks/mla_neck.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -63,8 +63,8 @@ def forward(self, inputs):
6363
class MLANeck(nn.Module):
6464
"""Multi-level Feature Aggregation.
6565
66-
The Multi-level Feature Aggregation construction of SETR:
67-
https://arxiv.org/pdf/2012.15840.pdf
66+
This neck is `The Multi-level Feature Aggregation construction of
67+
SETR <https://arxiv.org/abs/2012.15840>`_.
6868
6969
7070
Args:

mmseg/models/necks/multilevel_neck.py

+1
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ class MultiLevelNeck(nn.Module):
1111
"""MultiLevelNeck.
1212
1313
A neck structure connect vit backbone and decoder_heads.
14+
1415
Args:
1516
in_channels (List[int]): Number of input channels per scale.
1617
out_channels (int): Number of output channels (used at each scale).

0 commit comments

Comments
 (0)