Skip to content

Commit

Permalink
[TVMC] Global pass context for compile and tune
Browse files Browse the repository at this point in the history
Comes as a followup from conversations in apache#13216. By making the pass
context a global value for both `compile` and `tune` commands, we can
ensure the pass context is exactly as the user expected and also
test components such as `convert_graph_layout` under a pass context
suitable for testing (e.g. add instruments). With this change, it
becomes the users responsibility to ensure the PassContext they
select is suitable for the passes that will be run. By default,
`opt_level` remains as 3 so current workflows that do not alter the pass
context from the command line / TVMC Python API should not be affected.

Change-Id: I7a601daf6fbe664f77bce1b45efeb7ca29f621b3
  • Loading branch information
lhutton1 committed Nov 8, 2022
1 parent 60e2c98 commit 84c6fa3
Show file tree
Hide file tree
Showing 8 changed files with 247 additions and 205 deletions.
197 changes: 101 additions & 96 deletions python/tvm/driver/tvmc/autotuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -389,110 +389,115 @@ def tune_model(
# model is fixed. For now, creating a clone avoids the issue.
mod = deepcopy(tvmc_model.mod)
params = tvmc_model.params
if tuning_records is None:
tuning_records = tvmc_model.default_tuning_records_path()

for codegen_from_cli in extra_targets:
codegen = composite_target.get_codegen_by_target(codegen_from_cli["name"])
partition_function = codegen["pass_pipeline"]
mod = partition_function(mod, params, **codegen_from_cli["opts"])

# min_repeat_ms should be:
# a. the value provided by the user, if any, or
# b. 0ms in case target is "cpu"; otherwise 1000ms
if min_repeat_ms is None:
min_repeat_ms = 0 if target.keys[0] == "cpu" else 1000
logger.info("Default --min-repeat-ms for this target is %s", min_repeat_ms)

if rpc_key:
if hostname is None or port is None:
raise TVMCException(
"You must provide a hostname and port to connect to a remote RPC device."
)
if isinstance(port, str):
port = int(port)

logger.info("Tuning will be performed on device %s at %s:%d.", rpc_key, hostname, port)

runner_ctor = auto_scheduler.RPCRunner if enable_autoscheduler else autotvm.RPCRunner
runner = runner_ctor(
key=rpc_key,
host=hostname,
port=port,
number=number,
repeat=repeat,
n_parallel=parallel,
timeout=timeout,
min_repeat_ms=min_repeat_ms,
)
else:
logger.info("Starting localhost tuning.")
runner_ctor = (
auto_scheduler.LocalRPCMeasureContext if enable_autoscheduler else autotvm.LocalRunner
)
local_server = runner_ctor(
number=number,
repeat=repeat,
timeout=timeout,
min_repeat_ms=min_repeat_ms,
)

# For autoscheduling on some devices, we need to maintain a LocalRPCMeasureContext object.
if enable_autoscheduler:
runner = local_server.runner
with tvm.transform.PassContext(opt_level=3):
if tuning_records is None:
tuning_records = tvmc_model.default_tuning_records_path()

for codegen_from_cli in extra_targets:
codegen = composite_target.get_codegen_by_target(codegen_from_cli["name"])
partition_function = codegen["pass_pipeline"]
mod = partition_function(mod, params, **codegen_from_cli["opts"])

# min_repeat_ms should be:
# a. the value provided by the user, if any, or
# b. 0ms in case target is "cpu"; otherwise 1000ms
if min_repeat_ms is None:
min_repeat_ms = 0 if target.keys[0] == "cpu" else 1000
logger.info("Default --min-repeat-ms for this target is %s", min_repeat_ms)

if rpc_key:
if hostname is None or port is None:
raise TVMCException(
"You must provide a hostname and port to connect to a remote RPC device."
)
if isinstance(port, str):
port = int(port)

logger.info("Tuning will be performed on device %s at %s:%d.", rpc_key, hostname, port)

runner_ctor = auto_scheduler.RPCRunner if enable_autoscheduler else autotvm.RPCRunner
runner = runner_ctor(
key=rpc_key,
host=hostname,
port=port,
number=number,
repeat=repeat,
n_parallel=parallel,
timeout=timeout,
min_repeat_ms=min_repeat_ms,
)
else:
runner = local_server
logger.info("Starting localhost tuning.")
runner_ctor = (
auto_scheduler.LocalRPCMeasureContext
if enable_autoscheduler
else autotvm.LocalRunner
)
local_server = runner_ctor(
number=number,
repeat=repeat,
timeout=timeout,
min_repeat_ms=min_repeat_ms,
)

if enable_autoscheduler:
# For autoscheduling on some devices, we need to maintain a
# LocalRPCMeasureContext object.
if enable_autoscheduler:
runner = local_server.runner
else:
runner = local_server

tasks, weights = autoscheduler_get_tuning_tasks(
mod=mod,
params=params,
target=target,
alter_layout=desired_layout,
hardware_params=hardware_params,
include_simple_tasks=include_simple_tasks,
)
if enable_autoscheduler:

# Create the autoscheduler tuning options
tuning_options = auto_scheduler.TuningOptions(
num_measure_trials=trials,
measure_callbacks=[auto_scheduler.RecordToFile(tuning_records)],
runner=runner,
early_stopping=early_stopping,
)
tasks, weights = autoscheduler_get_tuning_tasks(
mod=mod,
params=params,
target=target,
alter_layout=desired_layout,
hardware_params=hardware_params,
include_simple_tasks=include_simple_tasks,
)

# Create the autoscheduler tuning options
tuning_options = auto_scheduler.TuningOptions(
num_measure_trials=trials,
measure_callbacks=[auto_scheduler.RecordToFile(tuning_records)],
runner=runner,
early_stopping=early_stopping,
)

logger.info("Autoscheduling with configuration: %s", tuning_options)
logger.info("Autoscheduling with configuration: %s", tuning_options)

# Schedule the tasks (i.e., produce a schedule for each task)
schedule_tasks(tasks, weights, tuning_options, prior_records, log_estimated_latency)
else:
tasks = autotvm_get_tuning_tasks(
mod=mod,
params=params,
target=target,
alter_layout=desired_layout,
)
# Schedule the tasks (i.e., produce a schedule for each task)
schedule_tasks(tasks, weights, tuning_options, prior_records, log_estimated_latency)
else:
tasks = autotvm_get_tuning_tasks(
mod=mod,
params=params,
target=target,
alter_layout=desired_layout,
)

# In autotvm, trials is specified per task. We can convert the per-model input
# provided to per-task trials by dividing by the number of tasks.
trials = int(trials / max(len(tasks), 1))
logger.info("Autotuning with %d trials per task.", trials)

tuning_options = {
"tuner": tuner,
"trials": trials,
"early_stopping": early_stopping,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(build_func="default"), runner=runner
),
"tuning_records": prior_records,
}
logger.info("Autotuning with configuration: %s", tuning_options)

tune_tasks(tasks, tuning_records, **tuning_options)

return tuning_records
# In autotvm, trials is specified per task. We can convert the per-model input
# provided to per-task trials by dividing by the number of tasks.
trials = int(trials / max(len(tasks), 1))
logger.info("Autotuning with %d trials per task.", trials)

tuning_options = {
"tuner": tuner,
"trials": trials,
"early_stopping": early_stopping,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(build_func="default"), runner=runner
),
"tuning_records": prior_records,
}
logger.info("Autotuning with configuration: %s", tuning_options)

tune_tasks(tasks, tuning_records, **tuning_options)

return tuning_records


def autotvm_get_tuning_tasks(
Expand Down
122 changes: 56 additions & 66 deletions python/tvm/driver/tvmc/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,39 +292,38 @@ def compile_model(

config = parse_configs(pass_context_configs)

if desired_layout:
mod = convert_graph_layout(mod, desired_layout)

tvm_target, extra_targets = target_from_cli(target, additional_target_options)
tvm_target, target_host = Target.canon_target_and_host(tvm_target, target_host)
with tvm.transform.PassContext(
opt_level=opt_level,
config=config,
disabled_pass=disabled_pass,
instruments=instruments,
):
if desired_layout:
mod = convert_graph_layout(mod, desired_layout)

tvm_target, extra_targets = target_from_cli(target, additional_target_options)
tvm_target, target_host = Target.canon_target_and_host(tvm_target, target_host)

for codegen_from_cli in extra_targets:
codegen = composite_target.get_codegen_by_target(codegen_from_cli["name"])
partition_function = codegen["pass_pipeline"]

if codegen["config_key"] is not None:
config[codegen["config_key"]] = codegen_from_cli["opts"]
mod = partition_function(mod, params, mod_name=mod_name, **codegen_from_cli["opts"])

for codegen_from_cli in extra_targets:
codegen = composite_target.get_codegen_by_target(codegen_from_cli["name"])
partition_function = codegen["pass_pipeline"]
if tuning_records and os.path.exists(tuning_records):
logger.debug("tuning records file provided: %s", tuning_records)

if codegen["config_key"] is not None:
config[codegen["config_key"]] = codegen_from_cli["opts"]
with tvm.transform.PassContext(config=config):
mod = partition_function(mod, params, mod_name=mod_name, **codegen_from_cli["opts"])
use_autoscheduler = True
try:
auto_scheduler.load_records(tuning_records)
except tvm._ffi.base.TVMError:
use_autoscheduler = False

if tuning_records and os.path.exists(tuning_records):
logger.debug("tuning records file provided: %s", tuning_records)

use_autoscheduler = True
try:
auto_scheduler.load_records(tuning_records)
except tvm._ffi.base.TVMError:
use_autoscheduler = False

if use_autoscheduler:
with auto_scheduler.ApplyHistoryBest(tuning_records):
config["relay.backend.use_auto_scheduler"] = True
with tvm.transform.PassContext(
opt_level=opt_level,
config=config,
disabled_pass=disabled_pass,
instruments=instruments,
):
if use_autoscheduler:
with auto_scheduler.ApplyHistoryBest(tuning_records):
config["relay.backend.use_auto_scheduler"] = True
logger.debug("building relay graph with autoscheduler")
graph_module = build(
mod,
Expand All @@ -336,14 +335,8 @@ def compile_model(
mod_name=mod_name,
workspace_pools=workspace_pools,
)
else:
with autotvm.apply_history_best(tuning_records):
with tvm.transform.PassContext(
opt_level=opt_level,
config=config,
disabled_pass=disabled_pass,
instruments=instruments,
):
else:
with autotvm.apply_history_best(tuning_records):
logger.debug("building relay graph with tuning records")
graph_module = build(
mod,
Expand All @@ -355,10 +348,7 @@ def compile_model(
mod_name=mod_name,
workspace_pools=workspace_pools,
)
else:
with tvm.transform.PassContext(
opt_level=opt_level, config=config, disabled_pass=disabled_pass, instruments=instruments
):
else:
logger.debug("building relay graph (no tuning records provided)")
graph_module = build(
mod,
Expand All @@ -371,32 +361,32 @@ def compile_model(
workspace_pools=workspace_pools,
)

# Generate output dump files with sources
if dump_code is None:
dump_code = []
if not isinstance(dump_code, list):
dump_code = [dump_code]
dumps = {}
for source_type in dump_code:
if use_vm:
lib = graph_module.lib
else:
lib = graph_module.get_lib()
# TODO lib.get_source call have inconsistent behavior for unsupported
# formats (@leandron).
source = str(mod) if source_type == "relay" else lib.get_source(source_type)
dumps[source_type] = source

# Create a new tvmc model package object from the graph definition.
package_path = tvmc_model.export_package(
graph_module, package_path, cross, cross_options, output_format
)
# Generate output dump files with sources
if dump_code is None:
dump_code = []
if not isinstance(dump_code, list):
dump_code = [dump_code]
dumps = {}
for source_type in dump_code:
if use_vm:
lib = graph_module.lib
else:
lib = graph_module.get_lib()
# TODO lib.get_source call have inconsistent behavior for unsupported
# formats (@leandron).
source = str(mod) if source_type == "relay" else lib.get_source(source_type)
dumps[source_type] = source

# Create a new tvmc model package object from the graph definition.
package_path = tvmc_model.export_package(
graph_module, package_path, cross, cross_options, output_format
)

# Write dumps to file.
if dumps:
save_dumps(package_path, dumps)
# Write dumps to file.
if dumps:
save_dumps(package_path, dumps)

return TVMCPackage(package_path)
return TVMCPackage(package_path)


def build(
Expand Down
11 changes: 4 additions & 7 deletions python/tvm/driver/tvmc/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,7 @@ def convert_graph_layout(mod, desired_layout):
]
)

with transform.PassContext(opt_level=3):
try:
return seq(mod)
except Exception as err:
raise TVMCException(
"Error converting layout to {0}: {1}".format(desired_layout, str(err))
)
try:
return seq(mod)
except Exception as err:
raise TVMCException("Error converting layout to {0}: {1}".format(desired_layout, str(err)))
Loading

0 comments on commit 84c6fa3

Please sign in to comment.