Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refine docs and fix format issues #3349

Merged
merged 5 commits into from
Jul 20, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .dev_scripts/linter.sh
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
yapf -r -i --style .style.yapf mmdet/ configs/ tests/ tools/
yapf -r -i --style mmdet/ configs/ tests/ tools/
isort -rc mmdet/ configs/ tests/ tools/
flake8 .
2 changes: 1 addition & 1 deletion .github/CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ We use the following tools for linting and formatting:
- [yapf](https://github.com/google/yapf): formatter
- [isort](https://github.com/timothycrosley/isort): sort imports

Style configurations of yapf and isort can be found in [.style.yapf](../.style.yapf) and [.isort.cfg](../.isort.cfg).
Style configurations of yapf and isort can be found in [setup.cfg](../setup.cfg).

We use [pre-commit hook](https://pre-commit.com/) that checks and formats for `flake8`, `yapf`, `isort`, `trailing whitespaces`,
fixes `end-of-files`, sorts `requirments.txt` automatically on every commit.
Expand Down
4 changes: 0 additions & 4 deletions .style.yapf

This file was deleted.

5 changes: 1 addition & 4 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,7 @@
]

autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version',
'mmdet.ops.corner_pool', 'mmdet.ops.dcn', 'mmdet.ops.masked_conv',
'mmdet.ops.nms', 'mmdet.ops.roi_align', 'mmdet.ops.roi_pool',
'mmdet.ops.sigmoid_focal_loss', 'mmdet.ops.carafe', 'mmdet.ops.utils'
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]

# Add any paths that contain templates here, relative to this directory.
Expand Down
14 changes: 10 additions & 4 deletions docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,19 @@ Welcome to MMDetection's documentation!
getting_started.md
config.md
model_zoo.md
tutorials/finetune.md
tutorials/new_dataset.md
tutorials/data_pipeline.md
tutorials/new_modules.md
compatibility.md
changelog.md
projects.md

.. toctree::
:maxdepth: 2
:caption: Tutorials

tutorials/index.rst

.. toctree::
:caption: API Reference

api.rst


Expand Down
7 changes: 7 additions & 0 deletions docs/tutorials/index.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
.. toctree::
:maxdepth: 2

finetune.md
new_dataset.md
data_pipeline.md
new_modules.md
29 changes: 15 additions & 14 deletions mmdet/core/anchor/anchor_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def gen_base_anchors(self):
"""Generate base anchors.

Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple
list(torch.Tensor): Base anchors of a feature grid in multiple \
feature levels.
"""
multi_level_base_anchors = []
Expand Down Expand Up @@ -155,7 +155,7 @@ def gen_single_level_base_anchors(self,
related to a single feature grid. Defaults to None.

Returns:
torch.Tensor: Anchors in a single-level feature maps
torch.Tensor: Anchors in a single-level feature maps.
"""
w = base_size
h = base_size
Expand Down Expand Up @@ -212,10 +212,10 @@ def grid_anchors(self, featmap_sizes, device='cuda'):
device (str): Device where the anchors will be put on.

Return:
list[torch.Tensor]: Anchors in multiple feature levels.
The sizes of each tensor should be [N, 4], where
N = width * height * num_base_anchors, width and height
are the sizes of the corresponding feature lavel,
list[torch.Tensor]: Anchors in multiple feature levels. \
The sizes of each tensor should be [N, 4], where \
N = width * height * num_base_anchors, width and height \
are the sizes of the corresponding feature lavel, \
num_base_anchors is the number of anchors for that level.
"""
assert self.num_levels == len(featmap_sizes)
Expand Down Expand Up @@ -308,7 +308,7 @@ def single_level_valid_flags(self,
Defaults to 'cuda'.

Returns:
torch.Tensor: The valid flags of each anchor in a single level
torch.Tensor: The valid flags of each anchor in a single level \
feature map.
"""
feat_h, feat_w = featmap_size
Expand Down Expand Up @@ -433,7 +433,7 @@ def gen_base_anchors(self):
"""Generate base anchors.

Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple
list(torch.Tensor): Base anchors of a feature grid in multiple \
feature levels.
"""
multi_level_base_anchors = []
Expand Down Expand Up @@ -471,12 +471,13 @@ def __repr__(self):
class LegacyAnchorGenerator(AnchorGenerator):
"""Legacy anchor generator used in MMDetection V1.x.

Difference to the V2.0 anchor generator:
Note:
Difference to the V2.0 anchor generator:

1. The center offset of V1.x anchors are set to be 0.5 rather than 0.
2. The width/height are minused by 1 when calculating the anchors' centers
and corners to meet the V1.x coordinate system.
3. The anchors' corners are quantized.
1. The center offset of V1.x anchors are set to be 0.5 rather than 0.
2. The width/height are minused by 1 when calculating the anchors' \
centers and corners to meet the V1.x coordinate system.
3. The anchors' corners are quantized.

Args:
strides (list[int] | list[tuple[int]]): Strides of anchors
Expand Down Expand Up @@ -523,7 +524,7 @@ def gen_single_level_base_anchors(self,
"""Generate base anchors of a single level.

Note:
The width/height of anchors are minused by 1 when calculating
The width/height of anchors are minused by 1 when calculating \
the centers and corners to meet the V1.x coordinate system.

Args:
Expand Down
2 changes: 1 addition & 1 deletion mmdet/core/anchor/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def anchor_inside_flags(flat_anchors,
Defaults to 0.

Returns:
torch.Tensor: Flags indicating whether the anchors are inside a
torch.Tensor: Flags indicating whether the anchors are inside a \
valid range.
"""
img_h, img_w = img_shape[:2]
Expand Down
35 changes: 20 additions & 15 deletions mmdet/core/bbox/assigners/center_region_assigner.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def get_gt_priorities(self, gt_bboxes):
gt_bboxes (Tensor): Ground truth boxes, shape (k, 4).

Returns:
Tensor: The priority of gts so that gts with larger priority is
Tensor: The priority of gts so that gts with larger priority is \
more likely to be assigned. Shape (k, )
"""
gt_areas = bboxes_area(gt_bboxes)
Expand All @@ -119,9 +119,10 @@ def get_gt_priorities(self, gt_bboxes):
def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
"""Assign gt to bboxes.

This method assigns gts to every bbox (proposal/anchor), each bbox will
be assigned with -1, or a semi-positive number. -1 means negative
sample, semi-positive number is the index (0-based) of assigned gt.
This method assigns gts to every bbox (proposal/anchor), each bbox \
will be assigned with -1, or a semi-positive number. -1 means \
negative sample, semi-positive number is the index (0-based) of \
assigned gt.

Args:
bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
Expand All @@ -131,12 +132,13 @@ def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
gt_labels (tensor, optional): Label of gt_bboxes, shape (num_gts,).

Returns:
:obj:`AssignResult`: The assigned result. Note that shadowed_labels
of shape (N, 2) is also added as an `assign_result` attribute.
`shadowed_labels` is a tensor composed of N pairs of
[anchor_ind, class_label], where N is the number of anchors that
lie in the outer region of a gt, anchor_ind is the shadowed
anchor index and class_label is the shadowed class label.
:obj:`AssignResult`: The assigned result. Note that \
shadowed_labels of shape (N, 2) is also added as an \
`assign_result` attribute. `shadowed_labels` is a tensor \
composed of N pairs of anchor_ind, class_label], where N \
is the number of anchors that lie in the outer region of a \
gt, anchor_ind is the shadowed anchor index and class_label \
is the shadowed class label.

Example:
>>> self = CenterRegionAssigner(0.2, 0.2)
Expand Down Expand Up @@ -261,11 +263,14 @@ def assign_one_hot_gt_indices(self,
match with multiple gts. Shape: (num_gt, ).

Returns:
assigned_gt_inds: The assigned gt index of each prior bbox
(i.e. index from 1 to num_gts). Shape: (num_prior, ).
shadowed_gt_inds: shadowed gt indices. It is a tensor of shape
(num_ignore, 2) with first column being the shadowed prior bbox
indices and the second column the shadowed gt indices (1-based)
tuple: Returns (assigned_gt_inds, shadowed_gt_inds).

- assigned_gt_inds: The assigned gt index of each prior bbox \
(i.e. index from 1 to num_gts). Shape: (num_prior, ).
- shadowed_gt_inds: shadowed gt indices. It is a tensor of \
shape (num_ignore, 2) with first column being the \
shadowed prior bbox indices and the second column the \
shadowed gt indices (1-based).
"""
num_bboxes, num_gts = is_bbox_in_gt_core.shape

Expand Down
4 changes: 2 additions & 2 deletions mmdet/core/bbox/samplers/sampling_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,9 +97,9 @@ def random(cls, rng=None, **kwargs):
kwargs (keyword arguments):
- num_preds: number of predicted boxes
- num_gts: number of true boxes
- p_ignore (float): probability of a predicted box assinged to
- p_ignore (float): probability of a predicted box assinged to \
an ignored truth.
- p_assigned (float): probability of a predicted box not being
- p_assigned (float): probability of a predicted box not being \
assigned.
- p_use_label (float | bool): with labels or not.

Expand Down
6 changes: 3 additions & 3 deletions mmdet/core/mask/structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -507,9 +507,9 @@ def to_bitmap(self):
def areas(self):
"""Compute areas of masks.

This func is modified from
https://github.com/facebookresearch/detectron2/blob/ffff8acc35ea88ad1cb1806ab0f00b4c1c5dbfd9/detectron2/structures/masks.py#L387
Only works with Polygons, using the shoelace formula
This func is modified from `detectron2
<https://github.com/facebookresearch/detectron2/blob/ffff8acc35ea88ad1cb1806ab0f00b4c1c5dbfd9/detectron2/structures/masks.py#L387>`_.
The function only works with Polygons using the shoelace formula.

Return:
ndarray: areas of each instance
Expand Down
4 changes: 2 additions & 2 deletions mmdet/core/mask/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ def split_combined_polys(polys, poly_lens, polys_per_mask):
of each mask

Returns:
list: a list (length = image num) of list (length = mask num) of
list (length = poly num) of numpy array
list: a list (length = image num) of list (length = mask num) of \
list (length = poly num) of numpy array.
"""
mask_polys_list = []
for img_id in range(len(polys)):
Expand Down
2 changes: 1 addition & 1 deletion mmdet/core/post_processing/bbox_nms.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def multiclass_nms(multi_bboxes,
applying NMS

Returns:
tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels
tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels \
are 0-based.
"""
num_classes = multi_scores.size(1) - 1
Expand Down
8 changes: 4 additions & 4 deletions mmdet/core/utils/misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,16 +37,16 @@ def multi_apply(func, *args, **kwargs):

Note:
This function applies the ``func`` to multiple inputs and
map the multiple outputs of the ``func`` into different
list. Each list contains the same type of outputs corresponding
to different inputs.
map the multiple outputs of the ``func`` into different
list. Each list contains the same type of outputs corresponding
to different inputs.

Args:
func (Function): A function that will be applied to a list of
arguments

Returns:
tuple(list): A tuple containing multiple list, each list contains
tuple(list): A tuple containing multiple list, each list contains \
a kind of returned results by the function
"""
pfunc = partial(func, **kwargs) if kwargs else func
Expand Down
16 changes: 8 additions & 8 deletions mmdet/datasets/cityscapes.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ def _parse_ann_info(self, img_info, ann_info):
ann_info (list[dict]): Annotation info of an image.

Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,
labels, masks, seg_map.
dict: A dict containing the following keys: bboxes, \
bboxes_ignore, labels, masks, seg_map. \
"masks" are already decoded into binary masks.
"""
gt_bboxes = []
Expand Down Expand Up @@ -102,8 +102,8 @@ def results2txt(self, results, outfile_prefix):
the txt files will be named "somepath/xxx.txt".

Returns:
list[str: str]: result txt files which contains corresponding
instance segmentation images.
list[str]: Result txt files which contains corresponding \
instance segmentation images.
"""
try:
import cityscapesscripts.helpers.labels as CSLabels
Expand Down Expand Up @@ -168,8 +168,8 @@ def format_results(self, results, txtfile_prefix=None):
If not specified, a temp file will be created. Default: None.

Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing
the json filepaths, tmp_dir is the temporal directory created
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving txt/png files when txtfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
Expand Down Expand Up @@ -229,7 +229,7 @@ def evaluate(self,
also be computed. Default: 0.5.

Returns:
dict[str, float]: COCO style evaluation metric or cityscapes mAP
dict[str, float]: COCO style evaluation metric or cityscapes mAP \
and AP@50.
"""
eval_results = dict()
Expand Down Expand Up @@ -268,7 +268,7 @@ def _evaluate_cityscapes(self, results, txtfile_prefix, logger):
related information during evaluation. Default: None.

Returns:
dict[str: float]: Cityscapes evaluation results, contains 'mAP'
dict[str: float]: Cityscapes evaluation results, contains 'mAP' \
and 'AP@50'.
"""

Expand Down
10 changes: 5 additions & 5 deletions mmdet/datasets/coco.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,8 +128,8 @@ def _parse_ann_info(self, img_info, ann_info):
with_mask (bool): Whether to parse mask annotations.

Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,
labels, masks, seg_map. "masks" are raw annotations and not
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
Expand Down Expand Up @@ -285,7 +285,7 @@ def results2json(self, results, outfile_prefix):
"somepath/xxx.proposal.json".

Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
Expand Down Expand Up @@ -344,8 +344,8 @@ def format_results(self, results, jsonfile_prefix=None, **kwargs):
If not specified, a temp file will be created. Default: None.

Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing
the json filepaths, tmp_dir is the temporal directory created
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
Expand Down
6 changes: 3 additions & 3 deletions mmdet/datasets/custom.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def __getitem__(self, idx):
idx (int): Index of data.

Returns:
dict: Training/test data (with annotation if `test_mode` is set
dict: Training/test data (with annotation if `test_mode` is set \
True).
"""

Expand All @@ -197,7 +197,7 @@ def prepare_train_img(self, idx):
idx (int): Index of data.

Returns:
dict: Training data and annotation after pipeline with new keys
dict: Training data and annotation after pipeline with new keys \
introduced by pipeline.
"""

Expand All @@ -216,7 +216,7 @@ def prepare_test_img(self, idx):
idx (int): Index of data.

Returns:
dict: Testing data after pipeline with new keys intorduced by
dict: Testing data after pipeline with new keys intorduced by \
piepline.
"""

Expand Down
Loading