From d23c72ed242a98a081754de4311f4a59bd64b8b4 Mon Sep 17 00:00:00 2001 From: Lianmin Zheng Date: Tue, 27 Oct 2020 07:05:10 -0700 Subject: [PATCH] [AutoScheduler] Re-organize logs files for tutorials (#6768) * reorganize logs files * fix lint --- tests/scripts/task_python_docs.sh | 5 ++--- .../auto_scheduler/{ => ci_logs}/conv2d.json | 1 + .../auto_scheduler/{ => ci_logs}/matmul.json | 2 +- .../auto_scheduler/tune_conv2d_layer_cuda.py | 15 ++++++------- tutorials/auto_scheduler/tune_matmul_x86.py | 21 +++++++++---------- 5 files changed, 20 insertions(+), 24 deletions(-) rename tutorials/auto_scheduler/{ => ci_logs}/conv2d.json (93%) rename tutorials/auto_scheduler/{ => ci_logs}/matmul.json (83%) diff --git a/tests/scripts/task_python_docs.sh b/tests/scripts/task_python_docs.sh index cbaffa2b37e4..3d229651cb4f 100755 --- a/tests/scripts/task_python_docs.sh +++ b/tests/scripts/task_python_docs.sh @@ -39,9 +39,8 @@ rm -rf docs/gen_modules rm -rf docs/doxygen # prepare auto scheduler tutorials -rm -rf tutorials/auto_scheduler/*logs -mkdir tutorials/auto_scheduler/logs -cp -f tutorials/auto_scheduler/{matmul,conv2d}.json tutorials/auto_scheduler/logs +rm -rf tutorials/auto_scheduler/*.json +cp -f tutorials/auto_scheduler/ci_logs/{matmul,conv2d}.json tutorials/auto_scheduler # remove stale tutorials and always build from scratch. rm -rf docs/tutorials diff --git a/tutorials/auto_scheduler/conv2d.json b/tutorials/auto_scheduler/ci_logs/conv2d.json similarity index 93% rename from tutorials/auto_scheduler/conv2d.json rename to tutorials/auto_scheduler/ci_logs/conv2d.json index 10f63d0d4c8a..c748920d14db 100644 --- a/tutorials/auto_scheduler/conv2d.json +++ b/tutorials/auto_scheduler/ci_logs/conv2d.json @@ -1 +1,2 @@ +# Keep a valid schedule for demonstraction. This is used to prevent flasky errors in CI. {"i": [["[\"conv2d_layer\", 1, 7, 7, 512, 512, 3, 3, [1, 1], [1, 1]]", "cuda -keys=cuda,gpu -max_num_threads=1024 -thread_warp_size=32"], [[], [["CI", 5], ["SP", 3, 0, 1, [1, 1, 1, 1], 1], ["SP", 3, 5, 512, [1, 64, 2, 1], 1], ["SP", 3, 10, 7, [1, 1, 1, 1], 1], ["SP", 3, 15, 7, [1, 1, 7, 1], 1], ["SP", 3, 20, 512, [4, 2], 1], ["SP", 3, 23, 3, [1, 1], 1], ["SP", 3, 26, 3, [3, 1], 1], ["RE", 3, [0, 5, 10, 15, 1, 6, 11, 16, 2, 7, 12, 17, 20, 23, 26, 21, 24, 27, 3, 8, 13, 18, 22, 25, 28, 4, 9, 14, 19]], ["FSP", 6, 0, 1, 3], ["FSP", 6, 4, 2, 3], ["FSP", 6, 8, 3, 3], ["FSP", 6, 12, 4, 3], ["RE", 6, [0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15]], ["CA", 3, 6, 11], ["CHR", 2, "shared", [3]], ["CA", 3, 4, 14], ["CHR", 1, "shared", [4]], ["CA", 2, 5, 14], ["CI", 1], ["FU", 8, [0, 1, 2, 3]], ["AN", 8, 0, 5], ["FU", 8, [1, 2, 3, 4]], ["AN", 8, 1, 4], ["FU", 8, [2, 3, 4, 5]], ["AN", 8, 2, 6], ["FU", 4, [0, 1, 2, 3]], ["SP", 4, 0, 48, [1], 1], ["AN", 4, 1, 2], ["FFSP", 4, 0, [4, 3, 2, 1], 1, 1], ["AN", 4, 1, 6], ["FU", 2, [0, 1, 2, 3]], ["SP", 2, 0, 504, [4], 1], ["AN", 2, 1, 2], ["FFSP", 2, 0, [4, 3, 2, 1], 1, 1], ["AN", 2, 1, 6], ["PR", 5, 0, "auto_unroll_max_step$512"]]]], "r": [[0.000429498], 0, 1.59126, 1603259147], "v": "v0.2"} diff --git a/tutorials/auto_scheduler/matmul.json b/tutorials/auto_scheduler/ci_logs/matmul.json similarity index 83% rename from tutorials/auto_scheduler/matmul.json rename to tutorials/auto_scheduler/ci_logs/matmul.json index 7f537641281a..827cfc9a6dbb 100644 --- a/tutorials/auto_scheduler/matmul.json +++ b/tutorials/auto_scheduler/ci_logs/matmul.json @@ -1,2 +1,2 @@ -# Keep a valid schedule for demonstraction +# Keep a valid schedule for demonstraction. This is used to prevent flasky errors in CI. {"i": [["[\"matmul_add\", 128, 128, 128, \"float32\"]", "llvm -keys=cpu"], [[], [["SP", 2, 0, 128, [4, 2, 4], 1], ["SP", 2, 4, 128, [1, 32, 2], 1], ["SP", 2, 8, 128, [2], 1], ["RE", 2, [0, 4, 1, 5, 8, 2, 6, 9, 3, 7]], ["FSP", 4, 0, 0, 1], ["FSP", 4, 2, 1, 1], ["RE", 4, [0, 2, 1, 3]], ["CA", 2, 4, 1], ["FU", 4, [0, 1]], ["AN", 4, 0, 3], ["PR", 2, 0, "auto_unroll_max_step$0"], ["AN", 2, 9, 2]]]], "r": [[5.80388e-05], 0, 0.299169, 1603402396], "v": "v0.2"} diff --git a/tutorials/auto_scheduler/tune_conv2d_layer_cuda.py b/tutorials/auto_scheduler/tune_conv2d_layer_cuda.py index 68fa5d597f66..10a2d1b44144 100644 --- a/tutorials/auto_scheduler/tune_conv2d_layer_cuda.py +++ b/tutorials/auto_scheduler/tune_conv2d_layer_cuda.py @@ -90,15 +90,12 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding): # * see :any:`auto_scheduler.TuningOptions`, # :any:`auto_scheduler.LocalRPCMeasureContext` for more parameters. -if not os.path.exists("./logs"): - os.mkdir("./logs") - -logfile = os.path.join("./logs", "conv2d.json") +log_file = "conv2d.json" measure_ctx = auto_scheduler.LocalRPCMeasureContext(min_repeat_ms=300) tune_option = auto_scheduler.TuningOptions( num_measure_trials=10, runner=measure_ctx.runner, - measure_callbacks=[auto_scheduler.RecordToFile(logfile)], + measure_callbacks=[auto_scheduler.RecordToFile(log_file)], ) ###################################################################### @@ -163,7 +160,7 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding): # print the equivalent python schedule API, and build the binary again. # Load the measuremnt record for the best schedule -inp, res = auto_scheduler.load_best(logfile, task.workload_key) +inp, res = auto_scheduler.load_best(log_file, task.workload_key) # Print equivalent python schedule API. This can be used for debugging and # learning the behavior of the auto-scheduler. @@ -183,15 +180,15 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding): cost_model = auto_scheduler.XGBModel() -cost_model.update_from_file(logfile) +cost_model.update_from_file(log_file) search_policy = auto_scheduler.SketchPolicy( - task, cost_model, init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(logfile)] + task, cost_model, init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(log_file)] ) measure_ctx = auto_scheduler.LocalRPCMeasureContext(min_repeat_ms=300) tune_option = auto_scheduler.TuningOptions( num_measure_trials=5, runner=measure_ctx.runner, - measure_callbacks=[auto_scheduler.RecordToFile(logfile)], + measure_callbacks=[auto_scheduler.RecordToFile(log_file)], ) sch, args = auto_scheduler.auto_schedule(task, search_policy, tuning_options=tune_option) diff --git a/tutorials/auto_scheduler/tune_matmul_x86.py b/tutorials/auto_scheduler/tune_matmul_x86.py index a2331fcc9835..81f2e71ff8f7 100644 --- a/tutorials/auto_scheduler/tune_matmul_x86.py +++ b/tutorials/auto_scheduler/tune_matmul_x86.py @@ -82,12 +82,9 @@ def matmul_add(N, L, M, dtype): # and do more analyses later. # * see :any:`auto_scheduler.TuningOptions` for more parameters -if not os.path.exists("./logs"): - os.mkdir("./logs") - -logfile = os.path.join("./logs", "matmul.json") +log_file = "matmul.json" tune_option = auto_scheduler.TuningOptions( - num_measure_trials=10, measure_callbacks=[auto_scheduler.RecordToFile(logfile)] + num_measure_trials=10, measure_callbacks=[auto_scheduler.RecordToFile(log_file)] ) ###################################################################### @@ -147,7 +144,7 @@ def matmul_add(N, L, M, dtype): # print the equivalent python schedule API, and build the binary again. # Load the measuremnt record for the best schedule -inp, res = auto_scheduler.load_best(logfile, task.workload_key) +inp, res = auto_scheduler.load_best(log_file, task.workload_key) # Print equivalent python schedule API. This can be used for debugging and # learning the behavior of the auto-scheduler. @@ -166,19 +163,21 @@ def matmul_add(N, L, M, dtype): # In the example below we resume the status and do more 5 trials. -def resume_search(task, logfile_name): +def resume_search(task, log_file_name): cost_model = auto_scheduler.XGBModel() - cost_model.update_from_file(logfile_name) + cost_model.update_from_file(log_file_name) search_policy = auto_scheduler.SketchPolicy( - task, cost_model, init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(logfile_name)] + task, + cost_model, + init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(log_file_name)], ) tune_option = auto_scheduler.TuningOptions( - num_measure_trials=5, measure_callbacks=[auto_scheduler.RecordToFile(logfile_name)] + num_measure_trials=5, measure_callbacks=[auto_scheduler.RecordToFile(log_file_name)] ) sch, args = auto_scheduler.auto_schedule(task, search_policy, tuning_options=tune_option) -# resume_search(task, logfile) +# resume_search(task, log_file) ###################################################################### # .. note::