Skip to content

Commit

Permalink
[AutoScheduler] Re-organize logs files for tutorials (apache#6768)
Browse files Browse the repository at this point in the history
* reorganize logs files

* fix lint
  • Loading branch information
merrymercy authored and zhiics committed Oct 28, 2020
1 parent 00e4ccb commit d23c72e
Show file tree
Hide file tree
Showing 5 changed files with 20 additions and 24 deletions.
5 changes: 2 additions & 3 deletions tests/scripts/task_python_docs.sh
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,8 @@ rm -rf docs/gen_modules
rm -rf docs/doxygen

# prepare auto scheduler tutorials
rm -rf tutorials/auto_scheduler/*logs
mkdir tutorials/auto_scheduler/logs
cp -f tutorials/auto_scheduler/{matmul,conv2d}.json tutorials/auto_scheduler/logs
rm -rf tutorials/auto_scheduler/*.json
cp -f tutorials/auto_scheduler/ci_logs/{matmul,conv2d}.json tutorials/auto_scheduler

# remove stale tutorials and always build from scratch.
rm -rf docs/tutorials
Expand Down
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
# Keep a valid schedule for demonstraction. This is used to prevent flasky errors in CI.
{"i": [["[\"conv2d_layer\", 1, 7, 7, 512, 512, 3, 3, [1, 1], [1, 1]]", "cuda -keys=cuda,gpu -max_num_threads=1024 -thread_warp_size=32"], [[], [["CI", 5], ["SP", 3, 0, 1, [1, 1, 1, 1], 1], ["SP", 3, 5, 512, [1, 64, 2, 1], 1], ["SP", 3, 10, 7, [1, 1, 1, 1], 1], ["SP", 3, 15, 7, [1, 1, 7, 1], 1], ["SP", 3, 20, 512, [4, 2], 1], ["SP", 3, 23, 3, [1, 1], 1], ["SP", 3, 26, 3, [3, 1], 1], ["RE", 3, [0, 5, 10, 15, 1, 6, 11, 16, 2, 7, 12, 17, 20, 23, 26, 21, 24, 27, 3, 8, 13, 18, 22, 25, 28, 4, 9, 14, 19]], ["FSP", 6, 0, 1, 3], ["FSP", 6, 4, 2, 3], ["FSP", 6, 8, 3, 3], ["FSP", 6, 12, 4, 3], ["RE", 6, [0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15]], ["CA", 3, 6, 11], ["CHR", 2, "shared", [3]], ["CA", 3, 4, 14], ["CHR", 1, "shared", [4]], ["CA", 2, 5, 14], ["CI", 1], ["FU", 8, [0, 1, 2, 3]], ["AN", 8, 0, 5], ["FU", 8, [1, 2, 3, 4]], ["AN", 8, 1, 4], ["FU", 8, [2, 3, 4, 5]], ["AN", 8, 2, 6], ["FU", 4, [0, 1, 2, 3]], ["SP", 4, 0, 48, [1], 1], ["AN", 4, 1, 2], ["FFSP", 4, 0, [4, 3, 2, 1], 1, 1], ["AN", 4, 1, 6], ["FU", 2, [0, 1, 2, 3]], ["SP", 2, 0, 504, [4], 1], ["AN", 2, 1, 2], ["FFSP", 2, 0, [4, 3, 2, 1], 1, 1], ["AN", 2, 1, 6], ["PR", 5, 0, "auto_unroll_max_step$512"]]]], "r": [[0.000429498], 0, 1.59126, 1603259147], "v": "v0.2"}
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
# Keep a valid schedule for demonstraction
# Keep a valid schedule for demonstraction. This is used to prevent flasky errors in CI.
{"i": [["[\"matmul_add\", 128, 128, 128, \"float32\"]", "llvm -keys=cpu"], [[], [["SP", 2, 0, 128, [4, 2, 4], 1], ["SP", 2, 4, 128, [1, 32, 2], 1], ["SP", 2, 8, 128, [2], 1], ["RE", 2, [0, 4, 1, 5, 8, 2, 6, 9, 3, 7]], ["FSP", 4, 0, 0, 1], ["FSP", 4, 2, 1, 1], ["RE", 4, [0, 2, 1, 3]], ["CA", 2, 4, 1], ["FU", 4, [0, 1]], ["AN", 4, 0, 3], ["PR", 2, 0, "auto_unroll_max_step$0"], ["AN", 2, 9, 2]]]], "r": [[5.80388e-05], 0, 0.299169, 1603402396], "v": "v0.2"}
15 changes: 6 additions & 9 deletions tutorials/auto_scheduler/tune_conv2d_layer_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,15 +90,12 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding):
# * see :any:`auto_scheduler.TuningOptions`,
# :any:`auto_scheduler.LocalRPCMeasureContext` for more parameters.

if not os.path.exists("./logs"):
os.mkdir("./logs")

logfile = os.path.join("./logs", "conv2d.json")
log_file = "conv2d.json"
measure_ctx = auto_scheduler.LocalRPCMeasureContext(min_repeat_ms=300)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=10,
runner=measure_ctx.runner,
measure_callbacks=[auto_scheduler.RecordToFile(logfile)],
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)

######################################################################
Expand Down Expand Up @@ -163,7 +160,7 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding):
# print the equivalent python schedule API, and build the binary again.

# Load the measuremnt record for the best schedule
inp, res = auto_scheduler.load_best(logfile, task.workload_key)
inp, res = auto_scheduler.load_best(log_file, task.workload_key)

# Print equivalent python schedule API. This can be used for debugging and
# learning the behavior of the auto-scheduler.
Expand All @@ -183,15 +180,15 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding):


cost_model = auto_scheduler.XGBModel()
cost_model.update_from_file(logfile)
cost_model.update_from_file(log_file)
search_policy = auto_scheduler.SketchPolicy(
task, cost_model, init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(logfile)]
task, cost_model, init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(log_file)]
)
measure_ctx = auto_scheduler.LocalRPCMeasureContext(min_repeat_ms=300)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=5,
runner=measure_ctx.runner,
measure_callbacks=[auto_scheduler.RecordToFile(logfile)],
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
sch, args = auto_scheduler.auto_schedule(task, search_policy, tuning_options=tune_option)

Expand Down
21 changes: 10 additions & 11 deletions tutorials/auto_scheduler/tune_matmul_x86.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,12 +82,9 @@ def matmul_add(N, L, M, dtype):
# and do more analyses later.
# * see :any:`auto_scheduler.TuningOptions` for more parameters

if not os.path.exists("./logs"):
os.mkdir("./logs")

logfile = os.path.join("./logs", "matmul.json")
log_file = "matmul.json"
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=10, measure_callbacks=[auto_scheduler.RecordToFile(logfile)]
num_measure_trials=10, measure_callbacks=[auto_scheduler.RecordToFile(log_file)]
)

######################################################################
Expand Down Expand Up @@ -147,7 +144,7 @@ def matmul_add(N, L, M, dtype):
# print the equivalent python schedule API, and build the binary again.

# Load the measuremnt record for the best schedule
inp, res = auto_scheduler.load_best(logfile, task.workload_key)
inp, res = auto_scheduler.load_best(log_file, task.workload_key)

# Print equivalent python schedule API. This can be used for debugging and
# learning the behavior of the auto-scheduler.
Expand All @@ -166,19 +163,21 @@ def matmul_add(N, L, M, dtype):
# In the example below we resume the status and do more 5 trials.


def resume_search(task, logfile_name):
def resume_search(task, log_file_name):
cost_model = auto_scheduler.XGBModel()
cost_model.update_from_file(logfile_name)
cost_model.update_from_file(log_file_name)
search_policy = auto_scheduler.SketchPolicy(
task, cost_model, init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(logfile_name)]
task,
cost_model,
init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(log_file_name)],
)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=5, measure_callbacks=[auto_scheduler.RecordToFile(logfile_name)]
num_measure_trials=5, measure_callbacks=[auto_scheduler.RecordToFile(log_file_name)]
)
sch, args = auto_scheduler.auto_schedule(task, search_policy, tuning_options=tune_option)


# resume_search(task, logfile)
# resume_search(task, log_file)

######################################################################
# .. note::
Expand Down

0 comments on commit d23c72e

Please sign in to comment.