Skip to content

Commit

Permalink
[Fix] fix hand readthedocs page (open-mmlab#1416)
Browse files Browse the repository at this point in the history
* fix hand readthedocs page

* fix docs and collect.py

* fix doc index
  • Loading branch information
ly015 authored and shuheilocale committed May 5, 2023
1 parent 561d268 commit 3f4df84
Show file tree
Hide file tree
Showing 7 changed files with 318 additions and 6 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,8 @@ Results on NVGesture test set

| Arch | Input Size | fps | bbox | AP_rgb | AP_depth | ckpt | log |
| :------------------------------------------------------ | :--------: | :-: | :-------: | :----: | :------: | :-----------------------------------------------------: | :----------------------------------------------------: |
| [I3D+MTUT](/configs/hand/gesture_sview_rgbd_vid/mtut/nvgesture/i3d_nvgesture_bbox_112x112_fps15.py)$^\*$ | 112x112 | 15 | $\\surd$ | 0.725 | 0.730 | [ckpt](https://download.openmmlab.com/mmpose/gesture/mtut/i3d_nvgesture_bbox_112x112_fps15-363b5956_20220530.pth) | [log](https://download.openmmlab.com/mmpose/gesture/mtut/i3d_nvgesture_bbox_112x112_fps15-20220530.log.json) |
| [I3D+MTUT](/configs/hand/gesture_sview_rgbd_vid/mtut/nvgesture/i3d_nvgesture_bbox_112x112_fps15.py)<sup>\*</sup> | 112x112 | 15 | $\\surd$ | 0.725 | 0.730 | [ckpt](https://download.openmmlab.com/mmpose/gesture/mtut/i3d_nvgesture_bbox_112x112_fps15-363b5956_20220530.pth) | [log](https://download.openmmlab.com/mmpose/gesture/mtut/i3d_nvgesture_bbox_112x112_fps15-20220530.log.json) |
| [I3D+MTUT](/configs/hand/gesture_sview_rgbd_vid/mtut/nvgesture/i3d_nvgesture_bbox_224x224_fps30.py) | 224x224 | 30 | $\\surd$ | 0.782 | 0.811 | [ckpt](https://download.openmmlab.com/mmpose/gesture/mtut/i3d_nvgesture_bbox_224x224_fps30-98a8f288_20220530.pthh) | [log](https://download.openmmlab.com/mmpose/gesture/mtut/i3d_nvgesture_bbox_224x224_fps30-20220530.log.json) |
| [I3D+MTUT](/configs/hand/gesture_sview_rgbd_vid/mtut/nvgesture/i3d_nvgesture_224x224_fps30.py) | 224x224 | 30 | $\\times$ | 0.739 | 0.809 | [ckpt](https://download.openmmlab.com/mmpose/gesture/mtut/i3d_nvgesture_224x224_fps30-b7abf574_20220530.pth) | [log](https://download.openmmlab.com/mmpose/gesture/mtut/i3d_nvgesture_224x224_fps30-20220530.log.json) |

$^\*$: MTUT supports multi-modal training and uni-modal testing. Model trained with this config can be used to recognize gestures in rgb videos with [inference config](/configs/hand/gesture_sview_rgbd_vid/mtut/nvgesture/i3d_nvgesture_bbox_112x112_fps15_rgb.py).
<sup>\*</sup>: MTUT supports multi-modal training and uni-modal testing. Model trained with this config can be used to recognize gestures in rgb videos with [inference config](/configs/hand/gesture_sview_rgbd_vid/mtut/nvgesture/i3d_nvgesture_bbox_112x112_fps15_rgb.py).
130 changes: 130 additions & 0 deletions docs/en/collect.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
#!/usr/bin/env python
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import re
from glob import glob

from titlecase import titlecase

os.makedirs('topics', exist_ok=True)
os.makedirs('papers', exist_ok=True)


def _parse_task(task):
"""Parse task name.
Data modality is represented by a string of 4 or 5 parts like:
- 2d_kpt_sview_rgb_img
- gesture_sview_rgbd_vid
"""

parts = task.split('_')
if len(parts) == 5:
pass
elif len(parts) == 4:
# The first part "spatial dimension" is optional
parts = [''] + parts
else:
raise ValueError('Invalid modality')

return parts


# Step 1: get subtopics: a mix of topic and task
minisections = [
x.split(osp.sep)[-2:] for x in glob('../../configs/*/*')
if '_base_' not in x
]
alltopics = sorted(list(set(x[0] for x in minisections)))
subtopics = []
for topic in alltopics:
tasks = [_parse_task(x[1]) for x in minisections if x[0] == topic]
valid_ids = []
for i in range(len(tasks[0])):
if len(set(x[i] for x in tasks)) > 1:
valid_ids.append(i)
if len(valid_ids) > 0:
for task in tasks:
appendix = ','.join(
[task[i].title() for i in valid_ids if task[i]])
subtopic = [
f'{titlecase(topic)}({appendix})',
topic,
'_'.join(t for t in task if t),
]
subtopics.append(subtopic)
else:
subtopics.append([titlecase(topic), topic, '_'.join(tasks[0])])

contents = {}
for subtopic, topic, task in sorted(subtopics):
# Step 2: get all datasets
datasets = sorted(
list(
set(
x.split(osp.sep)[-2]
for x in glob(f'../../configs/{topic}/{task}/*/*/'))))
contents[subtopic] = {d: {} for d in datasets}
for dataset in datasets:
# Step 3: get all settings: algorithm + backbone + trick
for file in glob(f'../../configs/{topic}/{task}/*/{dataset}/*.md'):
keywords = (file.split(osp.sep)[-3],
*file.split(osp.sep)[-1].split('_')[:-1])
with open(file, 'r', encoding='utf-8') as f:
contents[subtopic][dataset][keywords] = f.read()

# Step 4: write files by topic
for subtopic, datasets in contents.items():
lines = [f'# {subtopic}', '']
for dataset, keywords in datasets.items():
if len(keywords) == 0:
continue
lines += [
'<hr/>', '<br/><br/>', '', f'## {titlecase(dataset)} Dataset', ''
]
for keyword, info in keywords.items():
keyword_strs = [titlecase(x.replace('_', ' ')) for x in keyword]
lines += [
'<br/>', '',
(f'### {" + ".join(keyword_strs)}'
f' on {titlecase(dataset)}'), '', info, ''
]

with open(f'topics/{subtopic.lower()}.md', 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))

# Step 5: write files by paper
allfiles = [x.split(osp.sep)[-2:] for x in glob('../en/papers/*/*.md')]
sections = sorted(list(set(x[0] for x in allfiles)))
for section in sections:
lines = [f'# {titlecase(section)}', '']
files = [f for s, f in allfiles if s == section]
for file in files:
with open(
f'../en/papers/{section}/{file}', 'r', encoding='utf-8') as f:
keyline = [
line for line in f.readlines() if line.startswith('<summary')
][0]
papername = re.sub(r'\<.*?\>', '', keyline).strip()
paperlines = []
for subtopic, datasets in contents.items():
for dataset, keywords in datasets.items():
keywords = {k: v for k, v in keywords.items() if keyline in v}
if len(keywords) == 0:
continue
for keyword, info in keywords.items():
keyword_strs = [
titlecase(x.replace('_', ' ')) for x in keyword
]
paperlines += [
'<br/>', '',
(f'### {" + ".join(keyword_strs)}'
f' on {titlecase(dataset)}'), '', info, ''
]
if len(paperlines) > 0:
lines += ['<hr/>', '<br/><br/>', '', f'## {papername}', '']
lines += paperlines

with open(f'papers/{section}.md', 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
17 changes: 17 additions & 0 deletions docs/en/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -48,12 +48,29 @@ You can change the documentation language at the lower-left corner of the page.
:maxdepth: 2
:caption: Model Zoo

<<<<<<< HEAD
model_zoo.txt
model_zoo/body_2d_keypoint.md
model_zoo/face_2d_keypoint.md
model_zoo/hand_2d_keypoint.md
model_zoo/wholebody_2d_keypoint.md
model_zoo/animal_2d_keypoint.md
=======
modelzoo.md
topics/animal.md
topics/body(2d,kpt,sview,img).md
topics/body(2d,kpt,sview,vid).md
topics/body(3d,kpt,sview,img).md
topics/body(3d,kpt,sview,vid).md
topics/body(3d,kpt,mview,img).md
topics/body(3d,mesh,sview,img).md
topics/face.md
topics/fashion.md
topics/hand(2d,kpt,rgb,img).md
topics/hand(3d,kpt,rgb,img).md
topics/hand(gesture,rgbd,vid).md
topics/wholebody.md
>>>>>>> fef003a6 ([Fix] fix hand readthedocs page (#1416))

.. toctree::
:maxdepth: 2
Expand Down
13 changes: 11 additions & 2 deletions docs/en/stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def anchor(name):
stats = []

for f in files:
with open(f, 'r') as content_file:
with open(f, 'r', encoding='utf-8') as content_file:
content = content_file.read()

# title
Expand Down Expand Up @@ -94,7 +94,11 @@ def anchor(name):
"""

<<<<<<< HEAD
with open('model_zoo.md', 'w') as f:
=======
with open('modelzoo.md', 'w', encoding='utf-8') as f:
>>>>>>> fef003a6 ([Fix] fix hand readthedocs page (#1416))
f.write(modelzoo)

# Count datasets
Expand All @@ -105,7 +109,7 @@ def anchor(name):
datastats = []

for f in files:
with open(f, 'r') as content_file:
with open(f, 'r', encoding='utf-8') as content_file:
content = content_file.read()

# title
Expand Down Expand Up @@ -172,5 +176,10 @@ def anchor(name):
{datamsglist}
"""

<<<<<<< HEAD
with open('dataset_zoo.md', 'w') as f:
f.write(dataset_zoo)
=======
with open('datasets.md', 'w', encoding='utf-8') as f:
f.write(modelzoo)
>>>>>>> fef003a6 ([Fix] fix hand readthedocs page (#1416))
130 changes: 130 additions & 0 deletions docs/zh_cn/collect.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
#!/usr/bin/env python
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import re
from glob import glob

from titlecase import titlecase

os.makedirs('topics', exist_ok=True)
os.makedirs('papers', exist_ok=True)


def _parse_task(task):
"""Parse task name.
Data modality is represented by a string of 4 or 5 parts like:
- 2d_kpt_sview_rgb_img
- gesture_sview_rgbd_vid
"""

parts = task.split('_')
if len(parts) == 5:
pass
elif len(parts) == 4:
# The first part "spatial dimension" is optional
parts = [''] + parts
else:
raise ValueError('Invalid modality')

return parts


# Step 1: get subtopics: a mix of topic and task
minisections = [
x.split(osp.sep)[-2:] for x in glob('../../configs/*/*')
if '_base_' not in x
]
alltopics = sorted(list(set(x[0] for x in minisections)))
subtopics = []
for topic in alltopics:
tasks = [_parse_task(x[1]) for x in minisections if x[0] == topic]
valid_ids = []
for i in range(len(tasks[0])):
if len(set(x[i] for x in tasks)) > 1:
valid_ids.append(i)
if len(valid_ids) > 0:
for task in tasks:
appendix = ','.join(
[task[i].title() for i in valid_ids if task[i]])
subtopic = [
f'{titlecase(topic)}({appendix})',
topic,
'_'.join(t for t in task if t),
]
subtopics.append(subtopic)
else:
subtopics.append([titlecase(topic), topic, '_'.join(tasks[0])])

contents = {}
for subtopic, topic, task in sorted(subtopics):
# Step 2: get all datasets
datasets = sorted(
list(
set(
x.split(osp.sep)[-2]
for x in glob(f'../../configs/{topic}/{task}/*/*/'))))
contents[subtopic] = {d: {} for d in datasets}
for dataset in datasets:
# Step 3: get all settings: algorithm + backbone + trick
for file in glob(f'../../configs/{topic}/{task}/*/{dataset}/*.md'):
keywords = (file.split(osp.sep)[-3],
*file.split(osp.sep)[-1].split('_')[:-1])
with open(file, 'r', encoding='utf-8') as f:
contents[subtopic][dataset][keywords] = f.read()

# Step 4: write files by topic
for subtopic, datasets in contents.items():
lines = [f'# {subtopic}', '']
for dataset, keywords in datasets.items():
if len(keywords) == 0:
continue
lines += [
'<hr/>', '<br/><br/>', '', f'## {titlecase(dataset)} Dataset', ''
]
for keyword, info in keywords.items():
keyword_strs = [titlecase(x.replace('_', ' ')) for x in keyword]
lines += [
'<br/>', '',
(f'### {" + ".join(keyword_strs)}'
f' on {titlecase(dataset)}'), '', info, ''
]

with open(f'topics/{subtopic.lower()}.md', 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))

# Step 5: write files by paper
allfiles = [x.split(osp.sep)[-2:] for x in glob('../en/papers/*/*.md')]
sections = sorted(list(set(x[0] for x in allfiles)))
for section in sections:
lines = [f'# {titlecase(section)}', '']
files = [f for s, f in allfiles if s == section]
for file in files:
with open(
f'../en/papers/{section}/{file}', 'r', encoding='utf-8') as f:
keyline = [
line for line in f.readlines() if line.startswith('<summary')
][0]
papername = re.sub(r'\<.*?\>', '', keyline).strip()
paperlines = []
for subtopic, datasets in contents.items():
for dataset, keywords in datasets.items():
keywords = {k: v for k, v in keywords.items() if keyline in v}
if len(keywords) == 0:
continue
for keyword, info in keywords.items():
keyword_strs = [
titlecase(x.replace('_', ' ')) for x in keyword
]
paperlines += [
'<br/>', '',
(f'### {" + ".join(keyword_strs)}'
f' on {titlecase(dataset)}'), '', info, ''
]
if len(paperlines) > 0:
lines += ['<hr/>', '<br/><br/>', '', f'## {papername}', '']
lines += paperlines

with open(f'papers/{section}.md', 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
17 changes: 17 additions & 0 deletions docs/zh_cn/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,24 @@ You can change the documentation language at the lower-left corner of the page.
:maxdepth: 1
:caption: 相关项目

<<<<<<< HEAD
projects/community_projects.md
=======
modelzoo.md
topics/animal.md
topics/body(2d,kpt,sview,img).md
topics/body(2d,kpt,sview,vid).md
topics/body(3d,kpt,sview,img).md
topics/body(3d,kpt,sview,vid).md
topics/body(3d,kpt,mview,img).md
topics/body(3d,mesh,sview,img).md
topics/face.md
topics/fashion.md
topics/hand(2d,kpt,rgb,img).md
topics/hand(3d,kpt,rgb,img).md
topics/hand(gesture,rgbd,vid).md
topics/wholebody.md
>>>>>>> fef003a6 ([Fix] fix hand readthedocs page (#1416))

.. toctree::
:maxdepth: 1
Expand Down
Loading

0 comments on commit 3f4df84

Please sign in to comment.