Skip to content

Commit

Permalink
[REFACTOR][TIR] Migrate BuildConfig to PassContext.
Browse files Browse the repository at this point in the history
This PR migrates the TIR configurations from BuildConfig to the
PassContext used by the unified IR.
Moving forward, PassContext will be the unified way to configure passes in the TVM stack.

Changes

- Refactored TVM_PASS_REGISTER_CONFIG_OPTION to take in the reference type.
- Removed BuildConfig.
- Migrated the passes to use PassContext.
  • Loading branch information
tqchen committed May 25, 2020
1 parent 0833b07 commit 9547e5b
Show file tree
Hide file tree
Showing 58 changed files with 494 additions and 733 deletions.
1 change: 0 additions & 1 deletion apps/lldb/tvm.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ def __lldb_init_module(debugger, _):
"tvm::Attrs",
"tvm::BijectiveLayout",
"tvm::Buffer",
"tvm::BuildConfig",
"tvm::Channel",
"tvm::EnvFunc",
"tvm::Expr",
Expand Down
15 changes: 4 additions & 11 deletions include/tvm/driver/driver_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,23 +48,20 @@ namespace tvm {
* \param args The arguments to the function.
* \param name The name of the lowered function.
* \param binds Buffer assignments.
* \param config The build configuration.
* \return The result module.
*/
TVM_DLL IRModule lower(te::Schedule sch, const Array<te::Tensor>& args, const std::string& name,
const std::unordered_map<te::Tensor, tir::Buffer>& binds,
const BuildConfig& config);
const std::unordered_map<te::Tensor, tir::Buffer>& binds);

/*!
* \brief Build a device and host module for a specific target from an IRModule.
* \param funcs The functions to be built.
* \param target The target device to build for.
* \param target_host The target for building host code. To use the default, pass Target()
* \param config The build configuration.
* \return The built module.
*/
TVM_DLL runtime::Module build(const IRModule& funcs, const Target& target,
const Target& target_host, const BuildConfig& config);
const Target& target_host);

/*!
* \brief Build a device and host module for a specific target from a map
Expand All @@ -73,11 +70,9 @@ TVM_DLL runtime::Module build(const IRModule& funcs, const Target& target,
* \param input The map contains target to an IRModule.
* \param target_host The target for building host code. To use the default,
* pass Target().
* \param config The build configuration.
* \return The built module that contains code for different processors.
*/
TVM_DLL runtime::Module build(const Map<Target, IRModule>& input, const Target& target_host,
const BuildConfig& config);
TVM_DLL runtime::Module build(const Map<Target, IRModule>& input, const Target& target_host);

/*!
* \brief Build a device and host module for a specific target from a map
Expand All @@ -86,11 +81,9 @@ TVM_DLL runtime::Module build(const Map<Target, IRModule>& input, const Target&
* \param input The map contains target string to an IRModule.
* \param target_host The target for building host code. To use the default,
* pass Target().
* \param config The build configuration.
* \return The built module that contains code for different processors.
*/
TVM_DLL runtime::Module build(const Map<std::string, IRModule>& input, const Target& target_host,
const BuildConfig& config);
TVM_DLL runtime::Module build(const Map<std::string, IRModule>& input, const Target& target_host);
} // namespace tvm

#endif // TVM_DRIVER_DRIVER_API_H_
13 changes: 13 additions & 0 deletions include/tvm/ir/attrs.h
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,19 @@ class DictAttrs : public Attrs {
TVM_DEFINE_OBJECT_REF_COW_METHOD(DictAttrsNode);
};

/*!
* \brief Create a Attr object with all default values.
* \tparam TAttrNode the type to be created.
* \return A instance that will represent None.
*/
template <typename TAttrs>
inline TAttrs AttrsWithDefaultValues() {
static_assert(std::is_base_of<Attrs, TAttrs>::value, "Can only take attr nodes");
auto n = make_object<typename TAttrs::ContainerType>();
n->InitByPackedArgs(runtime::TVMArgs(nullptr, nullptr, 0), false);
return TAttrs(n);
}

// Namespace containing detail implementations
namespace detail {
using runtime::TVMArgValue;
Expand Down
5 changes: 3 additions & 2 deletions include/tvm/ir/transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -208,10 +208,11 @@ class PassContext : public ObjectRef {
* \brief Register a valid configuration option and its ValueType for validation.
*
* \param key The configuration key.
* \tparam ValueNodeType The value type to be registered
* \tparam ValueType The value type to be registered
*/
template <typename ValueNodeType>
template <typename ValueType>
static uint32_t RegisterConfigOption(const char* key) {
using ValueNodeType = typename ValueType::ContainerType;
// NOTE: we could further update the function later.
uint32_t tindex = ValueNodeType::_GetOrAllocRuntimeTypeIndex();
RegisterConfigOption(key, tindex);
Expand Down
104 changes: 0 additions & 104 deletions include/tvm/target/target.h
Original file line number Diff line number Diff line change
Expand Up @@ -172,109 +172,5 @@ TVM_DLL Target ext_dev(const std::vector<std::string>& options = std::vector<std
TVM_DLL Target hexagon(const std::vector<std::string>& options = std::vector<std::string>());
} // namespace target

/*!
* \brief Container for build configuration options
*/
class BuildConfigNode : public Object {
public:
/*!
* \brief Splitting factor for loop splitting. If this is set to zero, no splitting will be
* done. Otherwise, a split will be done with this factor and the inner loop will be unrolled.
*/
int double_buffer_split_loop = 1;
/*! \brief Threshold of number of steps in the loop to be automatically unrolled */
int auto_unroll_max_step = 0;
/*! \brief The maximum nested level of loops that can be automatically unrolled */
int auto_unroll_max_depth = 8;
/*! \brief The maximum extent of loop that will be unrolled */
int auto_unroll_max_extent = 0;
/*!
* \brief Whether to explicitly unroll the loop. If set to false, the unroll hint will
* be passed to the CodeGen phase. Set to true if CodeGen supports unroll pragma.
*/
bool unroll_explicit = true;

/*! \brief Set to true if buffer arguments do not overlap. This enables more optimization. */
bool restricted_func = true;

/*! \brief Whether to detect global barrier */
bool detect_global_barrier = false;

/*! \brief Whether to partition const loop */
bool partition_const_loop = false;

/*! \brief List of passes to be injected into the low-level pipeline. */
std::vector<std::pair<int, transform::Pass>> add_lower_pass;

/*! \brief Whether to instrument loads and stores with check for out of the bounds. */
bool instrument_bound_checkers = false;

/*! \brief Whether to disable select rewriting. */
bool disable_select_rewriting = false;

/*! \brief Whether to disable loop vectorization. */
bool disable_vectorize = false;

/*! \brief Whether to disable assert stmt generation. */
bool disable_assert = false;

void VisitAttrs(AttrVisitor* v) {
v->Visit("double_buffer_split_loop", &double_buffer_split_loop);
v->Visit("auto_unroll_max_step", &auto_unroll_max_step);
v->Visit("auto_unroll_max_depth", &auto_unroll_max_depth);
v->Visit("auto_unroll_max_extent", &auto_unroll_max_extent);
v->Visit("unroll_explicit", &unroll_explicit);
v->Visit("restricted_func", &restricted_func);
v->Visit("detect_global_barrier", &detect_global_barrier);
v->Visit("partition_const_loop", &partition_const_loop);
v->Visit("instrument_bound_checkers", &instrument_bound_checkers);
v->Visit("disable_select_rewriting", &disable_select_rewriting);
v->Visit("disable_vectorize", &disable_vectorize);
v->Visit("disable_assert", &disable_assert);
}

static constexpr const char* _type_key = "BuildConfig";
TVM_DECLARE_FINAL_OBJECT_INFO(BuildConfigNode, Object);
};

/*!
* \brief Build configuration for compilations.
*/
class BuildConfig : public ::tvm::ObjectRef {
public:
BuildConfig() {}
explicit BuildConfig(ObjectPtr<Object> n) : ObjectRef(n) {}
const BuildConfigNode* operator->() const { return static_cast<const BuildConfigNode*>(get()); }
BuildConfigNode* operator->() { return static_cast<BuildConfigNode*>(get_mutable()); }
/*!
* \brief Construct a BuildConfig containing a empty build config node.
* \return The new BuildConfig
*/
TVM_DLL static BuildConfig Create();
/*!
* \brief Get the current BuildConfig context from thread local storage, or a default
* configuration if a BuildConfig scope has not been entered.
* \return The configuration that is the current context.
*/
TVM_DLL static BuildConfig Current();

using ContainerType = BuildConfigNode;
class Internal;

private:
// Enable with syntax.
friend class With<BuildConfig>;
/*!
* \brief Push a new BuildConfig context onto the thread local stack.
*/
TVM_DLL void EnterWithScope();

/*!
* \brief Pop a build config off the thread local context stack,
* restoring the previous configuration as the current context.
*/
TVM_DLL void ExitWithScope();
};

} // namespace tvm
#endif // TVM_TARGET_TARGET_H_
15 changes: 3 additions & 12 deletions include/tvm/tir/transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -108,11 +108,9 @@ TVM_DLL Pass LiftAttrScope(std::string attr_key);
/*!
* \brief partition loops in the stmt.
*
* \param split_const_loop flag to enable partition for const loop
*
* \return The pass.
*/
TVM_DLL Pass LoopPartition(bool split_const_loop);
TVM_DLL Pass LoopPartition();

/*!
* \brief Lower vectorization loops.
Expand All @@ -133,10 +131,9 @@ TVM_DLL Pass InjectVirtualThread();
/*!
* \brief Inject double buffer statements.
*
* \param split_loop_factor Loop splitting factor.
* \return The pass.
*/
TVM_DLL Pass InjectDoubleBuffer(int split_loop_factor);
TVM_DLL Pass InjectDoubleBuffer();

/*!
* \brief Rewrite storage allocation pattern.
Expand All @@ -152,15 +149,9 @@ TVM_DLL Pass StorageRewrite();
* \brief unroll the constant loop marked by unroll.
* This pass also automatically attach pragma unroll tag to loops which meets the standard.
*
* \param auto_max_step The maximum step before stop attach automatic unroll
* \param auto_max_depth The maximum depth before stop attach automatic unroll
* \param auto_max_extent The maximum extent of the loop we can unroll,
* this is an legacy option that do not take the loop total steps into account.
* \param explicit_unroll Whether explicitly unroll the loop, or leave unroll annotation to codegen.
* \return The pass.
*/
TVM_DLL Pass UnrollLoop(int auto_max_step, int auto_max_depth, int auto_max_extent,
bool explicit_unroll);
TVM_DLL Pass UnrollLoop();

/*!
* \brief Remove No Op from the Stmt.
Expand Down
8 changes: 4 additions & 4 deletions python/tvm/autotvm/measure/measure_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,9 @@
import numpy as np

import tvm._ffi
import tvm.ir.transform
from tvm import nd, rpc as _rpc, target as _target
from tvm.error import TVMError
from tvm.target import build_config
from tvm.driver import build
from tvm.contrib import nvcc, ndk, tar

Expand Down Expand Up @@ -246,7 +246,7 @@ def get_build_kwargs(self):
if 'cuda' in self.task.target.keys:
kwargs["cuda_arch"] = "sm_" + "".join(ctx.compute_version.split('.'))
if self.task.target.device_name == 'micro_dev':
kwargs.setdefault('build_option', {})['disable_vectorize'] = True
kwargs.setdefault('build_option', {})['tir.disable_vectorize'] = True

return kwargs

Expand Down Expand Up @@ -360,7 +360,7 @@ def _build_func_common(measure_input, check_gpu=None, cuda_arch=None, build_opti

opts = build_option or {}
if check_gpu: # Add verify pass to filter out invalid configs in advance.
opts["add_lower_pass"] = [(2, gpu_verify_pass(**check_gpu))]
opts["tir.add_lower_pass"] = [(2, gpu_verify_pass(**check_gpu))]
if cuda_arch:
set_cuda_target_arch(cuda_arch)

Expand All @@ -371,7 +371,7 @@ def _build_func_common(measure_input, check_gpu=None, cuda_arch=None, build_opti
import vta
func = vta.build(s, args, target_host=task.target_host)
else:
with build_config(**opts):
with tvm.ir.transform.PassContext(config=opts):
func = build(s, args, target_host=task.target_host)
return func, tuple((get_const_tuple(x.shape), x.dtype) for x in args)

Expand Down
13 changes: 6 additions & 7 deletions python/tvm/autotvm/task/relay_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,13 +41,12 @@ def _lower(mod,
from tvm.relay.backend import graph_runtime_codegen

if hasattr(target, 'device_name') and target.device_name == "vta":
with relay.build_config(opt_level=3, disabled_pass={"AlterOpLayout"}):
import vta
with vta.build_config():
mod, _ = relay.optimize(mod, target, params)
grc = graph_runtime_codegen.GraphRuntimeCodegen(None, target)
grc.codegen(mod["main"])
return
import vta
with vta.build_config(opt_level=3, disabled_pass={"AlterOpLayout"}):
mod, _ = relay.optimize(mod, target, params)
grc = graph_runtime_codegen.GraphRuntimeCodegen(None, target)
grc.codegen(mod["main"])
return

# default case
# Try graph codegen first to extract autotvm tasks.
Expand Down
Loading

0 comments on commit 9547e5b

Please sign in to comment.