Skip to content

Commit

Permalink
[DOC] Improve the order of tutorials within a subsection (apache#6880)
Browse files Browse the repository at this point in the history
  • Loading branch information
merrymercy authored and Trevor Morris committed Dec 4, 2020
1 parent 3288531 commit 6dae2b0
Show file tree
Hide file tree
Showing 10 changed files with 86 additions and 29 deletions.
5 changes: 5 additions & 0 deletions docs/README.txt
Original file line number Diff line number Diff line change
Expand Up @@ -51,3 +51,8 @@ You will need a gpu CI environment.
```bash
./tests/scripts/task_python_docs.sh
```

Define the Order of Tutorials
-----------------------------
You can define the order of tutorials with `conf.py::subsection_order` and `conf.py::within_subsection_order`.
By default, the tutorials within one subsection is sorted by filename.
69 changes: 69 additions & 0 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,74 @@
]
)

# Explicitly define the order within a subsection.
# The listed files are sorted according to the list.
# The unlisted files are sorted by filenames.
# The unlisted files always appear after listed files.
within_subsection_order = {
"get_started": [
"relay_quick_start.py",
"tensor_expr_get_started.py",
"tvmc_command_line_driver.py",
"cross_compilation_and_rpc.py",
],
"frontend": [
"from_pytorch.py",
"from_tensorflow.py",
"from_mxnet.py",
"from_onnx.py",
"from_keras.py",
"from_tflite.py",
"from_coreml.py",
"from_darknet.py",
"from_caffe2.py",
],
"language": [
"schedule_primitives.py",
"reduciton.py",
"intrin_math.py",
"scan.py",
"extern_op.py",
"tensorize.py",
"tuple_inputs.py",
"tedd.py",
],
"optimize": [
"opt_gemm.py",
"opt_conv_cuda.py",
"opt_conv_tensorcore.py",
"opt_matmul_auto_tensorcore.py",
],
"autotvm": [
"tune_simple_template.py",
"tune_conv2d_cuda.py",
"tune_relay_cuda.py",
"tune_relay_x86.py",
"tune_relay_arm.py",
"tune_relay_mobile_gpu.py",
],
"auto_scheduler": ["tune_matmul_x86.py", "tune_conv2d_layer_cuda.py"],
}


class WithinSubsectionOrder:
def __init__(self, src_dir):
self.src_dir = src_dir.split("/")[-1]

def __call__(self, filename):
# If the order is provided, use the provided order
if (
self.src_dir in within_subsection_order
and filename in within_subsection_order[self.src_dir]
):
index = within_subsection_order[self.src_dir].index(filename)
assert index < 1e10
return "\0%010d" % index

# Otherwise, sort by filename
return filename


sphinx_gallery_conf = {
"backreferences_dir": "gen_modules/backreferences",
"doc_module": ("tvm", "numpy"),
Expand All @@ -213,6 +281,7 @@
"numpy": "https://numpy.org/doc/stable",
},
"examples_dirs": examples_dirs,
"within_subsection_order": WithinSubsectionOrder,
"gallery_dirs": gallery_dirs,
"subsection_order": subsection_order,
"filename_pattern": os.environ.get("TVM_TUTORIAL_EXEC_PATTERN", ".py"),
Expand Down
4 changes: 1 addition & 3 deletions tutorials/auto_scheduler/tune_conv2d_layer_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,7 @@
**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, \
`Chengfan Jia <https://github.com/jcf94/>`_
Different from the existing :ref:`autotvm <tutorials-autotvm-sec>` which relies on
Different from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on
manual templates to define the search space, the auto-scheduler does not require any templates.
Users only need to write the computation declaration without any schedule commands or templates.
The auto-scheduler can automatically generate a large search space and
Expand Down Expand Up @@ -182,7 +181,6 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding):
# and resume the status of search policy and cost model with the log file.
# In the example below we resume the status and do more 5 trials.


cost_model = auto_scheduler.XGBModel()
cost_model.update_from_file(log_file)
search_policy = auto_scheduler.SketchPolicy(
Expand Down
2 changes: 1 addition & 1 deletion tutorials/auto_scheduler/tune_matmul_x86.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, \
`Chengfan Jia <https://github.com/jcf94/>`_
Different from the existing :ref:`autotvm <tutorials-autotvm-sec>` which relies on
Different from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on
manual templates to define the search space, the auto-scheduler does not require any templates.
Users only need to write the computation declaration without any schedule commands or templates.
The auto-scheduler can automatically generate a large search space and
Expand Down
3 changes: 1 addition & 2 deletions tutorials/autotvm/tune_conv2d_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,7 @@
import numpy as np

import tvm
from tvm import te
from tvm import topi
from tvm import te, topi, testing
from tvm.topi.testing import conv2d_nchw_python

from tvm import autotvm
Expand Down
6 changes: 2 additions & 4 deletions tutorials/autotvm/tune_relay_arm.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,9 +66,7 @@

import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm import relay
from tvm import relay, autotvm
import tvm.relay.testing
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.contrib.utils import tempdir
Expand Down Expand Up @@ -104,7 +102,7 @@ def get_network(name, batch_size):
batch_size=batch_size, version="1.1", dtype=dtype
)
elif name == "inception_v3":
input_shape = (1, 3, 299, 299)
input_shape = (batch_size, 3, 299, 299)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
Expand Down
12 changes: 2 additions & 10 deletions tutorials/autotvm/tune_relay_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,12 +64,9 @@
import numpy as np

import tvm
from tvm import te
from tvm import autotvm
from tvm import relay
from tvm import relay, autotvm
import tvm.relay.testing
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.contrib.utils import tempdir
import tvm.contrib.graph_runtime as runtime

#################################################################
Expand Down Expand Up @@ -102,7 +99,7 @@ def get_network(name, batch_size):
batch_size=batch_size, version="1.1", dtype=dtype
)
elif name == "inception_v3":
input_shape = (1, 3, 299, 299)
input_shape = (batch_size, 3, 299, 299)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
Expand Down Expand Up @@ -239,11 +236,6 @@ def tune_and_evaluate(tuning_opt):
with tvm.transform.PassContext(opt_level=3):
lib = relay.build_module.build(mod, target=target, params=params)

# export library
tmp = tempdir()
filename = "net.tar"
lib.export_library(tmp.relpath(filename))

# load parameters
ctx = tvm.context(str(target), 0)
module = runtime.GraphModule(lib["default"](ctx))
Expand Down
6 changes: 2 additions & 4 deletions tutorials/autotvm/tune_relay_mobile_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,7 @@
import numpy as np

import tvm
from tvm import te
from tvm import autotvm
from tvm import relay
from tvm import relay, autotvm
import tvm.relay.testing
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.contrib.utils import tempdir
Expand Down Expand Up @@ -103,7 +101,7 @@ def get_network(name, batch_size):
batch_size=batch_size, version="1.1", dtype=dtype
)
elif name == "inception_v3":
input_shape = (1, 3, 299, 299)
input_shape = (batch_size, 3, 299, 299)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
Expand Down
6 changes: 2 additions & 4 deletions tutorials/autotvm/tune_relay_x86.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,7 @@
import numpy as np

import tvm
from tvm import te
from tvm import autotvm
from tvm import relay
from tvm import relay, autotvm
from tvm.relay import testing
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.autotvm.graph_tuner import DPTuner, PBQPTuner
Expand Down Expand Up @@ -73,7 +71,7 @@ def get_network(name, batch_size):
batch_size=batch_size, version="1.1", dtype=dtype
)
elif name == "inception_v3":
input_shape = (1, 3, 299, 299)
input_shape = (batch_size, 3, 299, 299)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
Expand Down
2 changes: 1 addition & 1 deletion tutorials/autotvm/tune_simple_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@

import numpy as np
import tvm
from tvm import te
from tvm import te, testing

# the module is called `autotvm`
from tvm import autotvm
Expand Down

0 comments on commit 6dae2b0

Please sign in to comment.