From d1592713c42aabc96c6a1c607a00b0fc6e3b9cb4 Mon Sep 17 00:00:00 2001 From: tqchen Date: Thu, 16 Jan 2020 10:56:52 -0800 Subject: [PATCH 1/2] [REFACTOR] introduce top - Tensor Operation DSL. Historically we put Tensor, Schedule and compute under the root tvm namespace. This is no longer a good idea as the project's scope grows larger than the tensor operation DSL. This PR introduces top -- a namespace for tensor operational DSL concepts such as schedule, tensor, compute. We moved the related files to the new top subfolder. --- include/tvm/arith/bound.h | 4 +- include/tvm/build_module.h | 6 +- include/tvm/ir_pass.h | 6 +- include/tvm/operation.h | 3 +- include/tvm/packed_func_ext.h | 4 +- include/tvm/relay/op_attr_types.h | 24 ++-- include/tvm/schedule.h | 3 +- include/tvm/schedule_pass.h | 4 +- include/tvm/tensor.h | 8 +- include/tvm/tensor_intrin.h | 2 + src/api/api_lang.cc | 2 + src/api/api_pass.cc | 4 +- src/api/api_schedule.cc | 4 +- src/arith/domain_touched.cc | 6 +- src/codegen/build_module.cc | 24 ++-- src/contrib/hybrid/codegen_hybrid.h | 1 + src/lang/tensor.cc | 3 +- src/op/compute_op.cc | 28 ++-- src/op/compute_op.h | 2 + src/op/cross_thread_reduction.cc | 12 +- src/op/extern_op.cc | 4 +- src/op/hybrid_op.cc | 18 ++- src/op/hybrid_op.h | 4 +- src/op/op_util.cc | 8 +- src/op/op_util.h | 4 +- src/op/placeholder_op.cc | 2 + src/op/scan_op.cc | 10 +- src/op/tensor_compute_op.cc | 23 ++-- src/op/tensorize.cc | 25 ++-- src/pass/inject_prefetch.cc | 2 +- src/pass/storage_flatten.cc | 12 +- src/pass/tensor_core.cc | 9 +- src/relay/backend/compile_engine.cc | 132 +++++++++--------- src/relay/backend/compile_engine.h | 4 +- src/relay/op/annotation/annotation.cc | 30 ++--- src/relay/op/debug.cc | 6 +- src/relay/op/memory/memory.cc | 20 +-- src/relay/op/nn/nn.cc | 28 ++-- src/relay/op/nn/pad.cc | 6 +- src/relay/op/nn/pooling.cc | 42 +++--- src/relay/op/tensor/binary.cc | 8 +- src/relay/op/tensor/reduce.cc | 44 +++--- src/relay/op/tensor/transform.cc | 150 ++++++++++----------- src/relay/op/tensor/unary.cc | 14 +- src/relay/op/vision/yolo.cc | 4 +- src/relay/pass/alter_op_layout.cc | 4 +- src/relay/pass/convert_layout.cc | 4 +- src/schedule/auto_inline_elem_wise.cc | 4 +- src/schedule/bound.cc | 4 +- src/schedule/graph.cc | 12 +- src/schedule/graph.h | 4 +- src/schedule/message_passing.cc | 4 +- src/schedule/message_passing.h | 4 +- src/schedule/schedule_dataflow_rewrite.cc | 24 ++-- src/schedule/schedule_lang.cc | 12 +- src/schedule/schedule_ops.cc | 4 +- tests/cpp/build_module_test.cc | 2 + tests/cpp/simple_passes_test.cc | 2 +- tests/cpp/tensor_test.cc | 12 +- topi/include/topi/broadcast.h | 40 +++--- topi/include/topi/contrib/cublas.h | 1 + topi/include/topi/contrib/rocblas.h | 1 + topi/include/topi/cuda/dense.h | 11 +- topi/include/topi/cuda/injective.h | 5 +- topi/include/topi/cuda/normalization.h | 1 + topi/include/topi/cuda/pooling.h | 1 + topi/include/topi/cuda/reduction.h | 1 + topi/include/topi/cuda/softmax.h | 7 +- topi/include/topi/detail/array_utils.h | 1 + topi/include/topi/detail/broadcast.h | 10 +- topi/include/topi/detail/constant_utils.h | 1 + topi/include/topi/detail/extern.h | 1 + topi/include/topi/detail/fuse.h | 1 + topi/include/topi/detail/pad_utils.h | 1 + topi/include/topi/detail/ravel_unravel.h | 1 + topi/include/topi/detail/tensor_utils.h | 1 + topi/include/topi/elemwise.h | 1 + topi/include/topi/generic/default.h | 3 +- topi/include/topi/generic/extern.h | 3 +- topi/include/topi/generic/injective.h | 5 +- topi/include/topi/image/resize.h | 1 + topi/include/topi/nn.h | 49 +++---- topi/include/topi/nn/batch_matmul.h | 7 +- topi/include/topi/nn/bias_add.h | 4 +- topi/include/topi/nn/bnn.h | 13 +- topi/include/topi/nn/dense.h | 11 +- topi/include/topi/nn/dilate.h | 3 +- topi/include/topi/nn/flatten.h | 3 +- topi/include/topi/nn/l2_normalize.h | 3 +- topi/include/topi/nn/local_response_norm.h | 7 +- topi/include/topi/nn/mapping.h | 5 +- topi/include/topi/nn/pooling.h | 25 ++-- topi/include/topi/nn/softmax.h | 15 ++- topi/include/topi/nn/upsampling.h | 1 + topi/include/topi/reduction.h | 7 +- topi/include/topi/rocm/dense.h | 11 +- topi/include/topi/rocm/injective.h | 3 +- topi/include/topi/rocm/normalization.h | 1 + topi/include/topi/rocm/pooling.h | 1 + topi/include/topi/rocm/reduction.h | 1 + topi/include/topi/rocm/softmax.h | 1 + topi/include/topi/transform.h | 11 +- topi/include/topi/vision/reorg.h | 3 +- topi/include/topi/x86/bnn.h | 1 + topi/include/topi/x86/default.h | 3 +- topi/include/topi/x86/injective.h | 5 +- topi/src/topi.cc | 28 ++-- 107 files changed, 625 insertions(+), 550 deletions(-) diff --git a/include/tvm/arith/bound.h b/include/tvm/arith/bound.h index 73c0733c41d3..dcc4364e256b 100644 --- a/include/tvm/arith/bound.h +++ b/include/tvm/arith/bound.h @@ -32,7 +32,9 @@ namespace tvm { // forward delcare Tensor +namespace top { class Tensor; +} namespace arith { /*! @@ -75,7 +77,7 @@ IntSet DeduceBound(PrimExpr v, PrimExpr cond, * \param consider_provides If provides (write) are considered. * \return The domain that covers all the calls or provides within the given statement. */ -Domain DomainTouched(Stmt body, const Tensor &tensor, bool consider_calls, bool consider_provides); +Domain DomainTouched(Stmt body, const top::Tensor &tensor, bool consider_calls, bool consider_provides); } // namespace arith } // namespace tvm diff --git a/include/tvm/build_module.h b/include/tvm/build_module.h index 29903786a2b3..7a09bdcadaf8 100644 --- a/include/tvm/build_module.h +++ b/include/tvm/build_module.h @@ -172,10 +172,10 @@ class BuildConfig : public ::tvm::ObjectRef { * \param config The build configuration. * \return The lowered function. */ -TVM_DLL Array lower(Schedule sch, - const Array& args, +TVM_DLL Array lower(top::Schedule sch, + const Array& args, const std::string& name, - const std::unordered_map& binds, + const std::unordered_map& binds, const BuildConfig& config); /*! * \brief Split host/device function and running necessary pass before build diff --git a/include/tvm/ir_pass.h b/include/tvm/ir_pass.h index 891d3245cff7..4b9638cd221d 100644 --- a/include/tvm/ir_pass.h +++ b/include/tvm/ir_pass.h @@ -203,7 +203,7 @@ Stmt Inline(Stmt stmt, * \return Transformed stmt. */ Stmt StorageFlatten(Stmt stmt, - Map extern_buffer, + Map extern_buffer, int cache_line_size, bool create_bound_attribute = false); @@ -217,8 +217,8 @@ Stmt StorageFlatten(Stmt stmt, * \return Transformed stmt. */ Stmt RewriteForTensorCore(Stmt stmt, - Schedule schedule, - Map extern_buffer); + top::Schedule schedule, + Map extern_buffer); /*! * \brief Verify if there is any argument bound to compact buffer. diff --git a/include/tvm/operation.h b/include/tvm/operation.h index 54c98a38991c..34b8a621cce3 100644 --- a/include/tvm/operation.h +++ b/include/tvm/operation.h @@ -37,7 +37,7 @@ #include "buffer.h" namespace tvm { - +namespace top { using arith::IntSet; /*! @@ -655,5 +655,6 @@ inline Tensor compute(Array shape, inline const OperationNode* Operation::operator->() const { return static_cast(get()); } +} // namspace top } // namespace tvm #endif // TVM_OPERATION_H_ diff --git a/include/tvm/packed_func_ext.h b/include/tvm/packed_func_ext.h index cc380cdbd8aa..32364e41629c 100644 --- a/include/tvm/packed_func_ext.h +++ b/include/tvm/packed_func_ext.h @@ -116,8 +116,8 @@ inline TVMPODValue_::operator tvm::PrimExpr() const { if (ptr->IsInstance()) { return IterVar(ObjectPtr(ptr))->var; } - if (ptr->IsInstance()) { - return Tensor(ObjectPtr(ptr))(); + if (ptr->IsInstance()) { + return top::Tensor(ObjectPtr(ptr))(); } CHECK(ObjectTypeChecker::Check(ptr)) << "Expect type " << ObjectTypeChecker::TypeName() diff --git a/include/tvm/relay/op_attr_types.h b/include/tvm/relay/op_attr_types.h index b6221e0ba8a5..660ddc9d0eae 100644 --- a/include/tvm/relay/op_attr_types.h +++ b/include/tvm/relay/op_attr_types.h @@ -99,10 +99,10 @@ using TShapeDataDependant = bool; * \return The output compute description of the operator. */ using FTVMCompute = runtime::TypedPackedFunc< - Array(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target)>; + Array(const Attrs& attrs, + const Array& inputs, + const Type& out_type, + const Target& target)>; /*! * \brief Build the computation schedule for @@ -114,9 +114,9 @@ using FTVMCompute = runtime::TypedPackedFunc< * \return schedule The computation schedule. */ using FTVMSchedule = runtime::TypedPackedFunc< - Schedule(const Attrs& attrs, - const Array& outs, - const Target& target)>; + top::Schedule(const Attrs& attrs, + const Array& outs, + const Target& target)>; /*! * \brief Alternate the layout of operators or replace the @@ -131,7 +131,7 @@ using FTVMSchedule = runtime::TypedPackedFunc< using FTVMAlterOpLayout = runtime::TypedPackedFunc< Expr(const Attrs& attrs, const Array& args, - const Array& tinfos)>; + const Array& tinfos)>; /*! * \brief Convert the layout of operators or replace the @@ -147,7 +147,7 @@ using FTVMAlterOpLayout = runtime::TypedPackedFunc< using FTVMConvertOpLayout = runtime::TypedPackedFunc< Expr(const Attrs& attrs, const Array& args, - const Array& tinfos, + const Array& tinfos, const std::string& desired_layout)>; /*! * \brief Legalizes an expression with another expression. This function will be @@ -206,9 +206,9 @@ enum AnyCodegenStrategy { using Shape = Array; using FShapeFunc = runtime::TypedPackedFunc< - Array(const Attrs& attrs, - const Array& inputs, - const Array& out_ndims)>; + Array(const Attrs& attrs, + const Array& inputs, + const Array& out_ndims)>; } // namespace relay } // namespace tvm diff --git a/include/tvm/schedule.h b/include/tvm/schedule.h index 3115b0a5ac7d..7444076c06bc 100644 --- a/include/tvm/schedule.h +++ b/include/tvm/schedule.h @@ -32,7 +32,7 @@ #include "tensor_intrin.h" namespace tvm { - +namespace top { // Node container for Stage class StageNode; // Node container for Schedule @@ -764,5 +764,6 @@ inline const IterVarRelationNode* IterVarRelation::operator->() const { inline const IterVarAttrNode* IterVarAttr::operator->() const { return static_cast(get()); } +} // namespace top } // namespace tvm #endif // TVM_SCHEDULE_H_ diff --git a/include/tvm/schedule_pass.h b/include/tvm/schedule_pass.h index af2459bb2c8e..de433319086f 100644 --- a/include/tvm/schedule_pass.h +++ b/include/tvm/schedule_pass.h @@ -31,7 +31,7 @@ #include "schedule.h" namespace tvm { -namespace schedule { +namespace top { /*! * \brief Infer the bound of all iteration variables relates to the schedule. @@ -71,6 +71,6 @@ void AutoInlineElemWise(Schedule sch); */ TVM_DLL void AutoInlineInjective(Schedule sch); -} // namespace schedule +} // namespace top } // namespace tvm #endif // TVM_SCHEDULE_PASS_H_ diff --git a/include/tvm/tensor.h b/include/tvm/tensor.h index 91c0c96b94ef..d8aeba7bf504 100644 --- a/include/tvm/tensor.h +++ b/include/tvm/tensor.h @@ -36,6 +36,7 @@ #include "expr_operator.h" namespace tvm { +namespace top { // Internal node container of Tensor class TensorNode; @@ -246,16 +247,17 @@ DEFINE_OVERLOAD_SLICE_BINARY_OP(<<); DEFINE_OVERLOAD_SLICE_BINARY_OP(>); // NOLINT(*) DEFINE_OVERLOAD_SLICE_BINARY_OP(<); // NOLINT(*) +} // namespace top } // namespace tvm namespace std { template <> -struct hash<::tvm::Operation> : public ::tvm::ObjectHash { +struct hash<::tvm::top::Operation> : public ::tvm::ObjectHash { }; template <> -struct hash<::tvm::Tensor> { - std::size_t operator()(const ::tvm::Tensor& k) const { +struct hash<::tvm::top::Tensor> { + std::size_t operator()(const ::tvm::top::Tensor& k) const { ::tvm::ObjectHash hasher; if (k.defined() && k->op.defined()) { return hasher(k->op); diff --git a/include/tvm/tensor_intrin.h b/include/tvm/tensor_intrin.h index 879e206c1365..6367f07fe305 100644 --- a/include/tvm/tensor_intrin.h +++ b/include/tvm/tensor_intrin.h @@ -29,6 +29,7 @@ #include "buffer.h" namespace tvm { +namespace top { // Internal node container of tensor intrinsics. class TensorIntrinNode; @@ -173,5 +174,6 @@ inline const TensorIntrinCallNode* TensorIntrinCall::operator->() const { return static_cast(get()); } +} // namespace top } // namespace tvm #endif // TVM_TENSOR_INTRIN_H_ diff --git a/src/api/api_lang.cc b/src/api/api_lang.cc index 2c7eb3cc0270..447254840409 100644 --- a/src/api/api_lang.cc +++ b/src/api/api_lang.cc @@ -274,6 +274,7 @@ TVM_REGISTER_GLOBAL("_BijectiveLayoutForwardShape") TVM_REGISTER_GLOBAL("_BijectiveLayoutBackwardShape") .set_body_method(&BijectiveLayout::BackwardShape); +namespace top { TVM_REGISTER_GLOBAL("_Tensor") .set_body_typed(TensorNode::make); @@ -441,6 +442,7 @@ TVM_REGISTER_GLOBAL("_ScheduleCacheWrite") TVM_REGISTER_GLOBAL("_ScheduleRFactor") .set_body_method(&Schedule::rfactor); +} // namespace top TVM_REGISTER_GLOBAL("_CommReducerCombine") .set_body_method(&ir::CommReducerNode::operator()); diff --git a/src/api/api_pass.cc b/src/api/api_pass.cc index 639855c2a441..a822cc140217 100644 --- a/src/api/api_pass.cc +++ b/src/api/api_pass.cc @@ -96,7 +96,9 @@ TVM_REGISTER_GLOBAL("ir_pass.StorageFlatten") TVM_REGISTER_GLOBAL("ir_pass.RewriteForTensorCore") .set_body_typed - ([](const Stmt& stmt, const Schedule& schedule, const Map& extern_buffer) { + ([](const Stmt& stmt, + const top::Schedule& schedule, + const Map& extern_buffer) { return RewriteForTensorCore(stmt, schedule, extern_buffer); }); diff --git a/src/api/api_schedule.cc b/src/api/api_schedule.cc index a7c27e4c8e47..1782920b22a4 100644 --- a/src/api/api_schedule.cc +++ b/src/api/api_schedule.cc @@ -31,7 +31,7 @@ #include "../schedule/graph.h" namespace tvm { -namespace schedule { +namespace top { TVM_REGISTER_GLOBAL("schedule.AutoInlineElemWise") .set_body_typed(AutoInlineElemWise); @@ -60,5 +60,5 @@ REGISTER_SCHEDULE_PASS(CreateAttachPath); REGISTER_SCHEDULE_PASS(ScanGetBody); REGISTER_SCHEDULE_PASS(ScanFixPointAnalysis); -} // namespace schedule +} // namespace top } // namespace tvm diff --git a/src/arith/domain_touched.cc b/src/arith/domain_touched.cc index 3889cd2cf918..b1da178d6a42 100644 --- a/src/arith/domain_touched.cc +++ b/src/arith/domain_touched.cc @@ -40,7 +40,7 @@ using namespace ir; // Find Read region of the tensor in the stmt. class FuncTouchedDomain final : public StmtExprVisitor { public: - FuncTouchedDomain(const Tensor &tensor, bool consider_calls, bool consider_provides) + FuncTouchedDomain(const top::Tensor &tensor, bool consider_calls, bool consider_provides) : tensor_(tensor), consider_calls_(consider_calls), consider_provides_(consider_provides) {} Domain Find(const Stmt& stmt) { @@ -108,13 +108,13 @@ class FuncTouchedDomain final : public StmtExprVisitor { } } - const Tensor &tensor_; + const top::Tensor &tensor_; bool consider_calls_, consider_provides_; std::vector > bounds_; std::unordered_map dom_map_; }; -Domain DomainTouched(Stmt stmt, const Tensor &tensor, bool consider_calls, bool consider_provides) { +Domain DomainTouched(Stmt stmt, const top::Tensor &tensor, bool consider_calls, bool consider_provides) { return FuncTouchedDomain(tensor, consider_calls, consider_provides).Find(stmt); } diff --git a/src/codegen/build_module.cc b/src/codegen/build_module.cc index 771583b66a74..06f8c139008b 100644 --- a/src/codegen/build_module.cc +++ b/src/codegen/build_module.cc @@ -87,10 +87,10 @@ Buffer BufferWithOffsetAlignment(Array shape, data_alignment, offset_factor, buffer_type); } -void GetBinds(const Array& args, +void GetBinds(const Array& args, bool compact, - const std::unordered_map& binds, - Map* out_binds, + const std::unordered_map& binds, + Map* out_binds, Array* out_arg_list, const BuildConfig& config) { *out_binds = binds; @@ -117,21 +117,21 @@ void GetBinds(const Array& args, * \param config The build configuration. * \return The built Stmt. */ -Stmt BuildStmt(Schedule sch, - const Array& args, - const std::unordered_map& binds, +Stmt BuildStmt(top::Schedule sch, + const Array& args, + const std::unordered_map& binds, bool loop_partition, Array *out_arg_list, const BuildConfig& config) { sch = sch.normalize(); // Phase 0 - auto bounds = schedule::InferBound(sch); - auto stmt = schedule::ScheduleOps(sch, bounds, false); + auto bounds = top::InferBound(sch); + auto stmt = top::ScheduleOps(sch, bounds, false); stmt = ir::InjectPrefetch(stmt); bool compact = ir::VerifyCompactBuffer(stmt); - Map out_binds; + Map out_binds; GetBinds(args, compact, binds, &out_binds, out_arg_list, config); // Phase 1 @@ -165,10 +165,10 @@ Stmt BuildStmt(Schedule sch, return stmt; } -Array lower(Schedule sch, - const Array& args, +Array lower(top::Schedule sch, + const Array& args, const std::string& name, - const std::unordered_map& binds, + const std::unordered_map& binds, const BuildConfig& config) { Array out_arg_list; auto stmt = BuildStmt(sch, args, binds, true, &out_arg_list, config); diff --git a/src/contrib/hybrid/codegen_hybrid.h b/src/contrib/hybrid/codegen_hybrid.h index 33bd0efae8a4..66f8ee923bce 100644 --- a/src/contrib/hybrid/codegen_hybrid.h +++ b/src/contrib/hybrid/codegen_hybrid.h @@ -38,6 +38,7 @@ namespace tvm { namespace contrib { +using namespace top; using namespace ir; /*! * \brief A base class to generate Hybrid Script. diff --git a/src/lang/tensor.cc b/src/lang/tensor.cc index 35b40295e7a9..9821ee2ed56d 100644 --- a/src/lang/tensor.cc +++ b/src/lang/tensor.cc @@ -26,7 +26,7 @@ #include namespace tvm { - +namespace top { // Tensor PrimExpr Tensor::operator()(Array indices) const { Array arr(indices.begin(), indices.end()); @@ -132,4 +132,5 @@ TVM_STATIC_IR_FUNCTOR(NodePrinter, vtable) TVM_REGISTER_NODE_TYPE(TensorIntrinCallNode); +} // namespace top } // namespace tvm diff --git a/src/op/compute_op.cc b/src/op/compute_op.cc index b82bab599b3e..b2d6d74f3e77 100644 --- a/src/op/compute_op.cc +++ b/src/op/compute_op.cc @@ -36,7 +36,7 @@ #include "../arith/interval_set.h" namespace tvm { - +namespace top { using namespace ir; TVM_STATIC_IR_FUNCTOR(NodePrinter, vtable) @@ -184,7 +184,7 @@ Operation ComputeOpNode::ReplaceInputs( if (this->body[0]->IsInstance()) { // Specially handle reduce so the replaced op // still share all the components - PrimExpr new_reduce = op::ReplaceTensor(this->body[0], rmap); + PrimExpr new_reduce = top::ReplaceTensor(this->body[0], rmap); if (!new_reduce.same_as(this->body[0])) { const ir::ReduceNode* r = new_reduce.as(); for (size_t k = 0; k < this->body.size(); ++k) { @@ -198,7 +198,7 @@ Operation ComputeOpNode::ReplaceInputs( } } else { arr = UpdateArray(this->body, [&rmap] (const PrimExpr& e) { - return op::ReplaceTensor(e, rmap); + return top::ReplaceTensor(e, rmap); }); } if (!arr.same_as(this->body)) { @@ -363,8 +363,8 @@ Stmt MakeComputeStmt(const ComputeOpNode* self, // grab the nest structure ComputeLoopNest n = ComputeLoopNest::make(self, stage, dom_map, debug_keep_trivial_loop); // Normal loop structure - n.init_nest.emplace_back(op::MakeIfNest(n.init_predicates)); - n.main_nest.emplace_back(op::MakeIfNest(n.main_predicates)); + n.init_nest.emplace_back(MakeIfNest(n.init_predicates)); + n.main_nest.emplace_back(MakeIfNest(n.main_predicates)); if (self->reduce_axis.size() != 0) { // make reduction. Stmt init, provide; @@ -374,7 +374,7 @@ Stmt MakeComputeStmt(const ComputeOpNode* self, } MakeReduction(self, source, &init, &provide); init = MergeNest(n.init_nest, init); - init = op::Substitute(init, n.init_vmap); + init = Substitute(init, n.init_vmap); // common nest std::vector > common( n.main_nest.begin(), n.main_nest.begin() + n.num_common_loop + 1); @@ -388,7 +388,7 @@ Stmt MakeComputeStmt(const ComputeOpNode* self, } // run substitution in the on the full nest, because loop condition // could depend on outer loops. - return op::Substitute(provide, n.main_vmap); + return Substitute(provide, n.main_vmap); } else { std::vector provides; for (size_t i = 0; i < self->body.size(); ++i) { @@ -398,7 +398,7 @@ Stmt MakeComputeStmt(const ComputeOpNode* self, provide = MergeNest(n.main_nest, provide); // run substitution in the on the full nest, because loop condition // could depend on outer loops. - return op::Substitute(provide, n.main_vmap); + return Substitute(provide, n.main_vmap); } } @@ -472,10 +472,10 @@ ComputeLoopNest ComputeLoopNest::make( CHECK_EQ(stage->op.operator->(), self); ComputeLoopNest ret; // make main loop nest - ret.main_nest = op::MakeLoopNest( + ret.main_nest = MakeLoopNest( stage, dom_map, 0, false, std::unordered_set(), &ret.main_vmap, debug_keep_trivial_loop); - ret.main_predicates = schedule::MakeBoundCheck( + ret.main_predicates = MakeBoundCheck( stage, dom_map, ret.main_vmap, false, std::unordered_set()); for (auto& e : ret.main_predicates) { @@ -495,7 +495,7 @@ ComputeLoopNest ComputeLoopNest::make( update_state[self->axis[i]] = 1; } // find which iter var is related to reduction and which is related to axis. - schedule::PassDownBitMaskOr(stage, &update_state); + top::PassDownBitMaskOr(stage, &update_state); auto leaf_iter_vars = stage->leaf_iter_vars; // first first loop that is related to reduction. size_t begin_loop = leaf_iter_vars.size(); @@ -514,10 +514,10 @@ ComputeLoopNest ComputeLoopNest::make( int flag = kv.second; if (flag == 2) skip_iter.insert(kv.first); } - ret.init_nest = op::MakeLoopNest( + ret.init_nest = MakeLoopNest( stage, dom_map, begin_loop, true, skip_iter, &(ret.init_vmap), debug_keep_trivial_loop); - ret.init_predicates = schedule::MakeBoundCheck( + ret.init_predicates = MakeBoundCheck( stage, dom_map, ret.init_vmap, true, skip_iter); for (auto& e : ret.init_predicates) { e = likely(e); @@ -637,4 +637,6 @@ Stmt TransformUpdate(const Stage& stage, return IfThenElseNode::make(arith::ComputeReduce(conds, const_true(1)), update, body); } + +} // namespace top } // namespace tvm diff --git a/src/op/compute_op.h b/src/op/compute_op.h index 3fe98e869541..3f9a98ab0be0 100644 --- a/src/op/compute_op.h +++ b/src/op/compute_op.h @@ -31,6 +31,7 @@ #include namespace tvm { +namespace top { // loop nest structure for general compute // This the loop nest structured used in compute. // Does not include the loop body. @@ -106,6 +107,7 @@ Stmt TransformUpdate(const Stage& stage, const ComputeLoopNest& n, Stmt body, Stmt update); +} // namespace top } // namespace tvm #endif // TVM_OP_COMPUTE_OP_H_ diff --git a/src/op/cross_thread_reduction.cc b/src/op/cross_thread_reduction.cc index 9de4bde19b17..bf5c9b167b3a 100644 --- a/src/op/cross_thread_reduction.cc +++ b/src/op/cross_thread_reduction.cc @@ -26,6 +26,7 @@ #include "op_util.h" namespace tvm { +namespace top { using namespace ir; Stmt MakeCrossThreadReduction( @@ -38,9 +39,9 @@ Stmt MakeCrossThreadReduction( args.push_back(iv->var); } std::unordered_map value_map; - auto nest = op::MakeLoopNest( + auto nest = MakeLoopNest( stage, dom_map, 0, false, std::unordered_set(), &value_map, debug_keep_trivial_loop); - auto conds = schedule::MakeBoundCheck( + auto conds = MakeBoundCheck( stage, dom_map, value_map, false, std::unordered_set()); @@ -101,8 +102,8 @@ Stmt MakeCrossThreadReduction( LoadNode::make(t, res_handles[idx], 0, const_true(t.lanes())), args); } Stmt assign_body = SeqStmt::Flatten(assigns); - assign_body = MergeNest(op::MakeIfNest(thread_head_check), assign_body); - assign_body = MergeNest(op::MakeIfNest(conds), assign_body); + assign_body = MergeNest(MakeIfNest(thread_head_check), assign_body); + assign_body = MergeNest(MakeIfNest(conds), assign_body); Stmt body = SeqStmt::Flatten(reduce_body, assign_body); for (size_t idx = size; idx != 0; --idx) { body = AllocateNode::make( @@ -110,7 +111,8 @@ Stmt MakeCrossThreadReduction( body = AttrStmtNode::make( res_handles[idx - 1], attr::storage_scope, StringImmNode::make("local"), body); } - body = op::Substitute(body, value_map); + body = Substitute(body, value_map); return MergeNest(nest, body); } +} // namespace top } // namespace tvm diff --git a/src/op/extern_op.cc b/src/op/extern_op.cc index fb9f49162be5..56443a5c335a 100644 --- a/src/op/extern_op.cc +++ b/src/op/extern_op.cc @@ -28,6 +28,7 @@ #include "op_util.h" namespace tvm { +namespace top { using namespace ir; // ExternOpNode TVM_STATIC_IR_FUNCTOR(NodePrinter, vtable) @@ -94,7 +95,7 @@ Operation ExternOpNode::ReplaceInputs( const std::unordered_map& rmap) const { CHECK_EQ(self.operator->(), this); auto n = make_object(*this); - n->body = op::ReplaceTensor(this->body, rmap); + n->body = ReplaceTensor(this->body, rmap); for (size_t i = 0; i < n->inputs.size(); ++i) { Tensor t = n->inputs[i]; if (rmap.count(t)) { @@ -181,4 +182,5 @@ Stmt ExternOpNode::BuildProvide( } return ret; } +} // namespace top } // namespace tvm diff --git a/src/op/hybrid_op.cc b/src/op/hybrid_op.cc index 8687ad661582..f01f7458d9e7 100644 --- a/src/op/hybrid_op.cc +++ b/src/op/hybrid_op.cc @@ -34,6 +34,7 @@ #include "hybrid_op.h" namespace tvm { +namespace top { using namespace ir; // HybridOpNode TVM_STATIC_IR_FUNCTOR(NodePrinter, vtable) @@ -76,7 +77,7 @@ Operation HybridOpNode::make(std::string name, n->attrs = std::move(attrs); n->inputs = std::move(inputs); n->outputs = std::move(outputs); - n->axis = op::GatherLoopVars(body); + n->axis = top::GatherLoopVars(body); n->body = std::move(body); Operation res = Operation(n); return res; @@ -109,7 +110,7 @@ Operation HybridOpNode::ReplaceInputs( const std::unordered_map &rmap) const { CHECK_EQ(self.operator->(), this); auto n = make_object(*this); - n->body = op::ReplaceTensor(this->body, rmap); + n->body = top::ReplaceTensor(this->body, rmap); for (size_t i = 0; i < n->inputs.size(); ++i) { Tensor t = n->inputs[i]; if (rmap.count(t)) { @@ -209,18 +210,15 @@ Stmt HybridOpNode::BuildProvide( * This is a major difference that HybridOpNode is NOT the same as * ExternOpNode. * */ - ret = op::ReplaceTensor(ret, rmap); - ret = op::ReplaceProvideTensor(ret, rmap); + ret = top::ReplaceTensor(ret, rmap); + ret = top::ReplaceProvideTensor(ret, rmap); - ret = op::ApplySchedule(stage, dom_map, ret); + ret = top::ApplySchedule(stage, dom_map, ret); return ret; } -namespace op { - - Stmt ApplyLoopShapes(const Stage &stage, - const std::unordered_map &dom_map, Stmt stmt) { + const std::unordered_map &dom_map, Stmt stmt) { class LoopSpliter : public StmtExprMutator { PrimExpr factor; const VarNode *parent; @@ -508,5 +506,5 @@ Stmt ReplaceProvideTensor(Stmt stmt, Stmt ret = repl(stmt); return repl.found ? ret : stmt; } -} // namespace op +} // namespace top } // namespace tvm diff --git a/src/op/hybrid_op.h b/src/op/hybrid_op.h index f180129c263c..d20d14fb6535 100644 --- a/src/op/hybrid_op.h +++ b/src/op/hybrid_op.h @@ -35,7 +35,7 @@ namespace tvm { -namespace op { +namespace top { /*! * \brief Find all the iteration variables in the given statement body. @@ -90,7 +90,7 @@ Stmt ApplyLoopOrder(const Stage &stage, const std::unordered_map &dom_map, const std::unordered_map &rebased, Stmt stmt); -} // namespace op +} // namespace top } // namespace tvm #endif // TVM_OP_HYBRID_OP_H_ diff --git a/src/op/op_util.cc b/src/op/op_util.cc index 52d3b5aa200a..12f14dc08dbb 100644 --- a/src/op/op_util.cc +++ b/src/op/op_util.cc @@ -31,7 +31,7 @@ #include "../arith/compute_expr.h" namespace tvm { -namespace op { +namespace top { using namespace arith; using namespace ir; @@ -172,7 +172,7 @@ MakeLoopNest(const Stage& stage, } } // message passing to get offset of root iter vars. - schedule::PassUpIndex(stage, dom_map, &value_map); + top::PassUpIndex(stage, dom_map, &value_map); return nest; } @@ -220,7 +220,7 @@ Stmt ReplaceTensor(Stmt stmt, return repl.found ? ret : stmt; } PrimExpr ReplaceTensor(PrimExpr expr, - const std::unordered_map& replace) { + const std::unordered_map& replace) { TensorReplacer repl(replace); PrimExpr ret = repl(expr); return repl.found ? ret : expr; @@ -266,5 +266,5 @@ ir::ForType IterVarTypeToForType(IterVarType iter_type) { } } -} // namespace op +} // namespace top } // namespace tvm diff --git a/src/op/op_util.h b/src/op/op_util.h index cea050b2695d..42df6da50c74 100644 --- a/src/op/op_util.h +++ b/src/op/op_util.h @@ -34,7 +34,7 @@ #include "../schedule/message_passing.h" namespace tvm { -namespace op { +namespace top { using ir::MergeNest; @@ -102,6 +102,6 @@ IterVarType ForTypeToIterVarType(ir::ForType for_type); */ ir::ForType IterVarTypeToForType(IterVarType iter_type); -} // namespace op +} // namespace top } // namespace tvm #endif // TVM_OP_OP_UTIL_H_ diff --git a/src/op/placeholder_op.cc b/src/op/placeholder_op.cc index 22e0ad4de34e..9121cfc3cfbc 100644 --- a/src/op/placeholder_op.cc +++ b/src/op/placeholder_op.cc @@ -24,6 +24,7 @@ #include namespace tvm { +namespace top { // PlaceholderOpNode TVM_STATIC_IR_FUNCTOR(NodePrinter, vtable) @@ -102,4 +103,5 @@ Stmt PlaceholderOpNode::BuildProvide( bool debug_keep_trivial_loop) const { return Stmt(); } +} // namespace top } // namespace tvm diff --git a/src/op/scan_op.cc b/src/op/scan_op.cc index f7b16f28acbf..ab16b827b7d3 100644 --- a/src/op/scan_op.cc +++ b/src/op/scan_op.cc @@ -28,7 +28,7 @@ #include "../schedule/graph.h" namespace tvm { - +namespace top { using namespace ir; TVM_STATIC_IR_FUNCTOR(NodePrinter, vtable) @@ -215,7 +215,6 @@ void ScanOpNode::GatherBound( const std::unordered_map& tensor_dom, std::unordered_map* out_dom_map) const { CHECK_EQ(self.operator->(), this); - using namespace schedule; CHECK(!out_dom_map->count(this->scan_axis)); std::vector output(this->num_outputs()); for (size_t i = 0; i < output.size(); ++i) { @@ -297,12 +296,13 @@ Stmt ScanOpNode::BuildProvide( } std::unordered_map vmap; std::unordered_set empty; - auto nest = op::MakeLoopNest( + auto nest = MakeLoopNest( stage, dom_map, 0, false, empty, &vmap, debug_keep_trivial_loop); nest[begin_scan].push_back(init); nest.push_back( - op::MakeIfNest( - schedule::MakeBoundCheck(stage, dom_map, vmap, false, empty))); + MakeIfNest( + MakeBoundCheck(stage, dom_map, vmap, false, empty))); return MergeNest(nest, provide); } +} // namespace top } // namespace tvm diff --git a/src/op/tensor_compute_op.cc b/src/op/tensor_compute_op.cc index 4c758ddd81fe..d924a1e9fe87 100644 --- a/src/op/tensor_compute_op.cc +++ b/src/op/tensor_compute_op.cc @@ -31,6 +31,7 @@ #include "../arith/compute_expr.h" namespace tvm { +namespace top { using namespace ir; // TensorComputeOpNode TVM_STATIC_IR_FUNCTOR(NodePrinter, vtable) @@ -81,12 +82,12 @@ Operation TensorComputeOpNode::ReplaceInputs( CHECK_EQ(self.operator->(), this); auto n = make_object(*this); auto intrin = make_object(*(this->intrin.operator->())); - intrin->body = op::ReplaceTensor(this->intrin->body, rmap); + intrin->body = ReplaceTensor(this->intrin->body, rmap); if (intrin->reduce_init.defined()) { - intrin->reduce_init = op::ReplaceTensor(this->intrin->reduce_init, rmap); + intrin->reduce_init = ReplaceTensor(this->intrin->reduce_init, rmap); } if (intrin->reduce_update.defined()) { - intrin->reduce_update = op::ReplaceTensor(this->intrin->reduce_update, rmap); + intrin->reduce_update = ReplaceTensor(this->intrin->reduce_update, rmap); } for (size_t i = 0; i < n->inputs.size(); ++i) { Tensor t = n->inputs[i]; @@ -208,7 +209,7 @@ Stmt TensorComputeOpNode::BuildProvide( if (this->reduce_axis.size() == 0) { std::vector > nest( n.main_nest.begin(), n.main_nest.begin() + tloc + 1); - nest.emplace_back(op::MakeIfNest(n.main_predicates)); + nest.emplace_back(MakeIfNest(n.main_predicates)); CHECK_EQ(n.init_predicates.size(), 0U); CHECK(this->intrin->body.defined()) << "Normal store op for intrin " << this << " is not defined"; @@ -216,7 +217,7 @@ Stmt TensorComputeOpNode::BuildProvide( body = MergeNest(input_bind_nest, body); body = ir::Substitute(body, vmap); body = MergeNest(binder.asserts(), body); - body = op::Substitute(body, n.main_vmap); + body = top::Substitute(body, n.main_vmap); Stmt ret = MergeNest(nest, body); return ret; } else { @@ -229,22 +230,22 @@ Stmt TensorComputeOpNode::BuildProvide( n.main_nest.begin(), n.main_nest.begin() + n.num_common_loop + 1); std::vector > update_nest( n.main_nest.begin() + n.num_common_loop + 1, n.main_nest.begin() + tloc + 1); - update_nest.emplace_back(op::MakeIfNest(n.main_predicates)); + update_nest.emplace_back(MakeIfNest(n.main_predicates)); if (this->intrin->reduce_init.defined()) { // init nest std::vector > init_nest( n.init_nest.begin(), n.init_nest.begin() + tloc + 1); - init_nest.emplace_back(op::MakeIfNest(n.init_predicates)); + init_nest.emplace_back(MakeIfNest(n.init_predicates)); Stmt init = MergeNest(output_bind_nest, this->intrin->reduce_init); - init = op::Substitute(init, n.init_vmap); + init = top::Substitute(init, n.init_vmap); init = MergeNest(init_nest, init); // The update Stmt update = MergeNest(output_bind_nest, this->intrin->reduce_update); update = MergeNest(input_bind_nest, update); update = ir::Substitute(update, vmap); update = MergeNest(binder.asserts(), update); - update = op::Substitute(update, n.main_vmap); + update = top::Substitute(update, n.main_vmap); update = MergeNest(update_nest, update); return MergeNest(common, SeqStmt::Flatten(init, update)); } else { @@ -258,11 +259,11 @@ Stmt TensorComputeOpNode::BuildProvide( update = MergeNest(input_bind_nest, update); update = ir::Substitute(update, vmap); update = MergeNest(binder.asserts(), update); - update = op::Substitute(update, n.main_vmap); + update = top::Substitute(update, n.main_vmap); update = MergeNest(update_nest, update); return MergeNest(common, update); } } } - +} // namespace top } // namespace tvm diff --git a/src/op/tensorize.cc b/src/op/tensorize.cc index 4460d905a54d..413bb424b720 100644 --- a/src/op/tensorize.cc +++ b/src/op/tensorize.cc @@ -32,9 +32,9 @@ #include "../schedule/message_passing.h" namespace tvm { +namespace top { using namespace ir; -using namespace op; // Detect the region of input and output to be tensrized. // out_dom: the domain of root iter vars in output op @@ -82,7 +82,7 @@ size_t InferTensorizeRegion( } CHECK(found_point); // Get domain of the tensorized scope. - schedule::PassUpDomain(stage, dom_map, &up_state); + top::PassUpDomain(stage, dom_map, &up_state); // Get domains if inputs std::unordered_map in_dom; std::unordered_map temp_dmap; @@ -445,15 +445,15 @@ Stmt MakeTensorize(const ComputeOpNode* self, // Do no need to split reduction std::vector > nest( n.main_nest.begin(), n.main_nest.begin() + tloc + 1); - nest.emplace_back(op::MakeIfNest(n.main_predicates)); + nest.emplace_back(MakeIfNest(n.main_predicates)); CHECK_EQ(n.init_predicates.size(), 0U); CHECK(intrin->body.defined()) << "Normal store op for intrin " << intrin << " is not defined"; Stmt body = MergeNest(output_bind_nest, intrin->body); body = MergeNest(input_bind_nest, body); - body = Substitute(body, vmap); + body = ir::Substitute(body, vmap); body = MergeNest(binder.asserts(), body); - body = Substitute(body, n.main_vmap); + body = top::Substitute(body, n.main_vmap); return MergeNest(nest, body); } else { // Need to split reduction @@ -465,22 +465,22 @@ Stmt MakeTensorize(const ComputeOpNode* self, n.main_nest.begin(), n.main_nest.begin() + n.num_common_loop + 1); std::vector > update_nest( n.main_nest.begin() + n.num_common_loop + 1, n.main_nest.begin() + tloc + 1); - update_nest.emplace_back(op::MakeIfNest(n.main_predicates)); + update_nest.emplace_back(MakeIfNest(n.main_predicates)); if (intrin->reduce_init.defined()) { // init nest std::vector > init_nest( n.init_nest.begin(), n.init_nest.begin() + tloc + 1); - init_nest.emplace_back(op::MakeIfNest(n.init_predicates)); + init_nest.emplace_back(MakeIfNest(n.init_predicates)); Stmt init = MergeNest(output_bind_nest, intrin->reduce_init); - init = Substitute(init, n.init_vmap); + init = top::Substitute(init, n.init_vmap); init = MergeNest(init_nest, init); // The update Stmt update = MergeNest(output_bind_nest, intrin->reduce_update); update = MergeNest(input_bind_nest, update); - update = Substitute(update, vmap); + update = ir::Substitute(update, vmap); update = MergeNest(binder.asserts(), update); - update = Substitute(update, n.main_vmap); + update = top::Substitute(update, n.main_vmap); update = MergeNest(update_nest, update); return MergeNest(common, SeqStmt::Flatten(init, update)); } else { @@ -492,9 +492,9 @@ Stmt MakeTensorize(const ComputeOpNode* self, intrin->reduce_update); update = MergeNest(output_bind_nest, update); update = MergeNest(input_bind_nest, update); - update = Substitute(update, vmap); + update = ir::Substitute(update, vmap); update = MergeNest(binder.asserts(), update); - update = Substitute(update, n.main_vmap); + update = top::Substitute(update, n.main_vmap); update = MergeNest(update_nest, update); return MergeNest(common, update); } @@ -533,4 +533,5 @@ TVM_REGISTER_GLOBAL("test.op.MatchTensorizeBody") intrin, &vrange); }); +} // namespace top } // namespace tvm diff --git a/src/pass/inject_prefetch.cc b/src/pass/inject_prefetch.cc index d87786385a02..a2895d55a70d 100644 --- a/src/pass/inject_prefetch.cc +++ b/src/pass/inject_prefetch.cc @@ -39,7 +39,7 @@ class PrefetchInjector : public StmtMutator { Stmt ret = StmtMutator::VisitStmt_(op); op = ret.as(); if (op && op->attr_key == attr::prefetch_scope) { - Tensor ts = Downcast(op->node); + top::Tensor ts = Downcast(op->node); CHECK_NE(loop_nest_.size(), 0U); Domain domain = DomainTouched(op->body, ts, true, false); Region region; diff --git a/src/pass/storage_flatten.cc b/src/pass/storage_flatten.cc index fc46ef3a4523..81223351a99c 100644 --- a/src/pass/storage_flatten.cc +++ b/src/pass/storage_flatten.cc @@ -49,7 +49,7 @@ using intrinsic::tvm_address_of; class StorageFlattener : public StmtExprMutator { public: - explicit StorageFlattener(Map extern_buffer, + explicit StorageFlattener(Map extern_buffer, int cache_line_size, bool create_bound_attributes, IRVisitorWithAnalyzer* bounded_analyzer) : bounded_analyzer_(bounded_analyzer), @@ -82,8 +82,8 @@ class StorageFlattener : public StmtExprMutator { storage_scope_[op->node.get()] = op->value.as()->value; return this->VisitStmt(op->body); } else if (op->attr_key == attr::double_buffer_scope && - op->node->IsInstance()) { - Operation func = Downcast(op->node); + op->node->IsInstance()) { + auto func = Downcast(op->node); Stmt body = this->VisitStmt(op->body); for (int i = 0; i < func->num_outputs(); ++i) { TensorKey key{func, i}; @@ -104,7 +104,7 @@ class StorageFlattener : public StmtExprMutator { } else if (op->attr_key == attr::buffer_bind_scope) { return HandleBufferBindScope(op); } else if (op->attr_key == attr::buffer_dim_align) { - Tensor tensor = Downcast(op->node); + auto tensor = Downcast(op->node); const CallNode* tuple = op->value.as(); CHECK(tuple && tuple->is_intrinsic(intrinsic::tvm_tuple)); TensorKey key{tensor->op, tensor->value_index}; @@ -406,7 +406,7 @@ class StorageFlattener : public StmtExprMutator { Array arr = Downcast > (op->node); CHECK_EQ(arr.size(), 2U); const BufferNode* buffer = arr[0].as(); - const TensorNode* tensor = arr[1].as(); + const top::TensorNode* tensor = arr[1].as(); const CallNode* tuple = op->value.as(); CHECK(buffer && tensor); CHECK(tuple && tuple->is_intrinsic(intrinsic::tvm_tuple)); @@ -529,7 +529,7 @@ class StorageFlattener : public StmtExprMutator { bool create_bound_attributes_{false}; }; -Stmt StorageFlatten(Stmt stmt, Map extern_buffer, +Stmt StorageFlatten(Stmt stmt, Map extern_buffer, int cache_line_size, bool create_bound_attributes) { IRVisitorWithAnalyzer bounded_analyzer; bounded_analyzer(stmt); diff --git a/src/pass/tensor_core.cc b/src/pass/tensor_core.cc index c5c81cacb688..5e92f909b78b 100644 --- a/src/pass/tensor_core.cc +++ b/src/pass/tensor_core.cc @@ -39,6 +39,7 @@ namespace tvm { namespace ir { +using namespace top; using runtime::StorageRank; using runtime::StorageScope; using runtime::ThreadScope; @@ -417,7 +418,7 @@ class BufferAnalyser : public StmtExprVisitor { storage_scope_[op->node.get()] = op->value.as()->value; this->VisitStmt(op->body); } else if (op->attr_key == attr::buffer_dim_align) { - Tensor tensor = Downcast(op->node); + top::Tensor tensor = Downcast(op->node); const CallNode* tuple = op->value.as(); CHECK(tuple && tuple->is_intrinsic(intrinsic::tvm_tuple)); auto& vinfo = dim_align_[TensorKey{tensor->op, tensor->value_index}]; @@ -831,7 +832,7 @@ class TensorCoreIRMutator : public StmtExprMutator { Stmt VisitStmt_(const AttrStmtNode* op) final { Stmt stmt = StmtExprMutator::VisitStmt_(op); if (op->attr_key == attr::realize_scope) { - auto node = op->node.as(); + auto node = op->node.as(); if (node != nullptr) { if (!frag_reg_.count(node->name)) { return stmt; @@ -1119,9 +1120,9 @@ class TensorCoreIRMutator : public StmtExprMutator { buffer_node->offset_factor = 1; Buffer buffer(buffer_node); - ObjectPtr tensor_node = make_object(); + ObjectPtr tensor_node = make_object(); tensor_node->value_index = key.value_index; - tensor_node->op = Downcast(key.f); + tensor_node->op = Downcast(key.f); tensor_node->shape = shape; tensor_node->dtype = datatype; Tensor tensor(tensor_node); diff --git a/src/relay/backend/compile_engine.cc b/src/relay/backend/compile_engine.cc index 5ee4ce30c96d..3089281bac88 100644 --- a/src/relay/backend/compile_engine.cc +++ b/src/relay/backend/compile_engine.cc @@ -101,20 +101,20 @@ Array GetShape(const Array& shape) { // The getter to get schedule from compile engine. // Get schedule from functor. class ScheduleGetter : - public ExprFunctor(const Expr&)> { + public ExprFunctor(const Expr&)> { public: explicit ScheduleGetter(Target target) : target_(target), device_copy_op_(Op::Get("device_copy")) {} - std::pair Create(const Function& prim_func) { + std::pair Create(const Function& prim_func) { static auto fschedule = Op::GetAttr("FTVMSchedule"); auto cache_node = make_object(); cache_node->target = target_; for (Var param : prim_func->params) { - Array inputs; + Array inputs; if (const auto* ttype = param->checked_type().as()) { - tvm::Tensor tensor = tvm::placeholder( + tvm::top::Tensor tensor = tvm::top::placeholder( GetShape(ttype->shape), ttype->dtype); cache_node->inputs.push_back(tensor); inputs.push_back(tensor); @@ -125,7 +125,7 @@ class ScheduleGetter : const auto* ttype = field.as(); // TODO(@icemelon): Allow recursive tuple CHECK(ttype != nullptr); - tvm::Tensor tensor = tvm::placeholder( + tvm::top::Tensor tensor = tvm::top::placeholder( GetShape(ttype->shape), ttype->dtype); cache_node->inputs.push_back(tensor); inputs.push_back(tensor); @@ -150,13 +150,13 @@ class ScheduleGetter : // Fusion over tupled results may leave identity relationships // between inputs and outputs, and those should not be scheduled. // Hence schedule only non PlaceholderOp outputs. - tvm::Array tensor_outs; + tvm::Array tensor_outs; for (const auto& tensor : cache_node->outputs) { - if (!tensor->op.as()) { + if (!tensor->op.as()) { tensor_outs.push_back(tensor); } } - Schedule schedule; + top::Schedule schedule; // No need to register schedule for device copy op. if (master_attrs_.as() == nullptr) { schedule = @@ -170,27 +170,27 @@ class ScheduleGetter : return std::make_pair(schedule, cfunc); } - Array VisitExpr(const Expr& expr) { + Array VisitExpr(const Expr& expr) { auto it = memo_.find(expr); if (it != memo_.end()) { return it->second; } else { - Array res = ExprFunctor::VisitExpr(expr); + Array res = ExprFunctor::VisitExpr(expr); memo_[expr] = res; return res; } } - Array VisitExpr_(const VarNode* op) final { + Array VisitExpr_(const VarNode* op) final { LOG(FATAL) << "Free variable " << op->name_hint(); return {}; } - Array VisitExpr_(const ConstantNode* op) final { + Array VisitExpr_(const ConstantNode* op) final { CHECK(op->is_scalar()); void* data = op->data->data; DataType dtype = DataType(op->data->dtype); - Tensor value = tvm::compute({}, [&](const Array&) { + auto value = top::compute({}, [&](const Array&) { if (dtype == DataType::Int(32)) { return make_const(dtype, static_cast(data)[0]); } else if (dtype == DataType::Int(64)) { @@ -210,19 +210,19 @@ class ScheduleGetter : return {value}; } - Array VisitExpr_(const CallNode* call_node) final { + Array VisitExpr_(const CallNode* call_node) final { static auto fcompute = Op::GetAttr("FTVMCompute"); static auto fpattern = Op::GetAttr("TOpPattern"); - Array inputs; + Array inputs; int count_tuple = 0; for (Expr arg : call_node->args) { if (arg->checked_type().as()) { ++count_tuple; } - for (Tensor tensor : VisitExpr(arg)) { + for (top::Tensor tensor : VisitExpr(arg)) { inputs.push_back(tensor); } } @@ -252,12 +252,12 @@ class ScheduleGetter : CHECK(call_node->op.as()) << "Primitive function only allows call into primitive ops"; Op op = Downcast(call_node->op); - Array outputs; + Array outputs; // Skip fcompute for device copy operators as it is not registered. if (op == device_copy_op_) { const auto* copy_input = inputs[0].operator->(); - outputs.push_back(TensorNode::make(copy_input->shape, copy_input->dtype, - Operation(), 0)); + outputs.push_back(top::TensorNode::make(copy_input->shape, copy_input->dtype, + top::Operation(), 0)); } else { outputs = fcompute[op](call_node->attrs, inputs, call_node_type, target_); @@ -291,33 +291,33 @@ class ScheduleGetter : return outputs; } - Array VisitExpr_(const FunctionNode* op) final { + Array VisitExpr_(const FunctionNode* op) final { LOG(FATAL) << "Do not support sub function"; - return Array(); + return Array(); } - Array VisitExpr_(const LetNode* op) final { - Array val = VisitExpr(op->value); + Array VisitExpr_(const LetNode* op) final { + Array val = VisitExpr(op->value); CHECK(!memo_.count(op->var)); memo_[op->var] = val; return VisitExpr(op->body); } - Array VisitExpr_(const TupleNode* op) final { - Array fields; + Array VisitExpr_(const TupleNode* op) final { + Array fields; for (Expr field : op->fields) { CHECK(field->checked_type().as()) << "Only allow Tuple of Tensor"; - Array res = VisitExpr(field); + Array res = VisitExpr(field); CHECK_EQ(res.size(), 1); fields.push_back(res[0]); } return fields; } - Array VisitExpr_(const TupleGetItemNode* op) final { + Array VisitExpr_(const TupleGetItemNode* op) final { const auto* tuple_type = op->tuple->type_as(); - Array tuple = VisitExpr(op->tuple); + Array tuple = VisitExpr(op->tuple); CHECK_EQ(tuple_type->fields.size(), tuple.size()); CHECK_GE(op->index, 0); CHECK_LT(static_cast(op->index), tuple.size()); @@ -330,28 +330,28 @@ class ScheduleGetter : Attrs master_attrs_; int master_op_pattern_{0}; std::ostringstream readable_name_stream_; - std::unordered_map, ObjectHash, ObjectEqual> memo_; - Array scalars_; + std::unordered_map, ObjectHash, ObjectEqual> memo_; + Array scalars_; // Cache device copy op for equivalence checking to reduce registry lookup // overhead for each invocation of call node when retrieving schedules. const Op& device_copy_op_; }; // Creates shape function from functor. -class MakeShapeFunc : public ExprFunctor(const Expr&)> { +class MakeShapeFunc : public ExprFunctor(const Expr&)> { public: MakeShapeFunc() {} - std::pair Create(const Function& prim_func) { + std::pair Create(const Function& prim_func) { for (auto param : prim_func->params) { param_states_[param] = kNoNeed; - Array data_inputs; - Array shape_inputs; + Array data_inputs; + Array shape_inputs; auto add_placeholder = [&data_inputs, &shape_inputs](const TensorTypeNode* ttype) { // Add data placeholder Shape shape = GetShape(ttype->shape); - tvm::Tensor data_tensor = tvm::placeholder(shape, ttype->dtype); + tvm::top::Tensor data_tensor = tvm::top::placeholder(shape, ttype->dtype); data_inputs.push_back(data_tensor); // Add shape placeholder int64_t ndim = shape.size(); @@ -359,7 +359,7 @@ class MakeShapeFunc : public ExprFunctor(const Expr&)> { if (ndim > 0) { sshape.push_back(tvm::Integer(ndim)); } - tvm::Tensor shape_tensor = tvm::placeholder(sshape, DataType::Int(64)); + tvm::top::Tensor shape_tensor = tvm::top::placeholder(sshape, DataType::Int(64)); shape_inputs.push_back(shape_tensor); }; @@ -410,12 +410,12 @@ class MakeShapeFunc : public ExprFunctor(const Expr&)> { CachedFunc cfunc(cache_node); // generate schedule for shape func - Array out_ops; + Array out_ops; for (auto t : cache_node->outputs) { out_ops.push_back(t->op); } - auto schedule = create_schedule(out_ops); - tvm::schedule::AutoInlineInjective(schedule); + auto schedule = top::create_schedule(out_ops); + tvm::top::AutoInlineInjective(schedule); for (const auto& scalar : scalars_) { auto scalar_op = scalar->op; if (schedule->Contain(scalar_op)) { @@ -425,12 +425,12 @@ class MakeShapeFunc : public ExprFunctor(const Expr&)> { return std::make_pair(schedule, cfunc); } - Array VisitExpr(const Expr& expr) { + Array VisitExpr(const Expr& expr) { auto it = memo_.find(expr); if (it != memo_.end()) { return it->second; } else { - Array res = ExprFunctor::VisitExpr(expr); + Array res = ExprFunctor::VisitExpr(expr); if (expr.as() == nullptr) { // Do not memoize vars because shape functions could use either the data // or the shape of a var each time. @@ -440,7 +440,7 @@ class MakeShapeFunc : public ExprFunctor(const Expr&)> { } } - Array VisitExpr_(const VarNode* var_node) final { + Array VisitExpr_(const VarNode* var_node) final { auto var = GetRef(var_node); auto it = param_states_.find(var); if (it == param_states_.end()) { @@ -459,14 +459,14 @@ class MakeShapeFunc : public ExprFunctor(const Expr&)> { } } - Array VisitExpr_(const ConstantNode* op) final { + Array VisitExpr_(const ConstantNode* op) final { CHECK(data_dependants_.size()); CHECK(op->is_scalar()); bool data_dependant = data_dependants_.back(); if (data_dependant) { void* data = op->data->data; DataType dtype = DataType(op->data->dtype); - Tensor value = tvm::compute({}, [&](const Array&) { + auto value = tvm::top::compute({}, [&](const Array&) { if (dtype == DataType::Int(32)) { return make_const(dtype, static_cast(data)[0]); } else if (dtype == DataType::Int(64)) { @@ -485,7 +485,7 @@ class MakeShapeFunc : public ExprFunctor(const Expr&)> { scalars_.push_back(value); return {value}; } else { - Tensor value = tvm::compute({}, [&](const Array&) { + auto value = tvm::top::compute({}, [&](const Array&) { return make_const(DataType::Int(64), 0); }, "shape_const", topi::kBroadcast); scalars_.push_back(value); @@ -493,7 +493,7 @@ class MakeShapeFunc : public ExprFunctor(const Expr&)> { } } - Array VisitExpr_(const CallNode* call_node) final { + Array VisitExpr_(const CallNode* call_node) final { static auto fshape_func = Op::GetAttr("FShapeFunc"); static auto tshape_data_dependant = Op::GetAttr( "TShapeDataDependant"); @@ -510,13 +510,13 @@ class MakeShapeFunc : public ExprFunctor(const Expr&)> { data_dependants_.push_back(tshape_data_dependant[op]); // Visit all inputs - Array inputs; + Array inputs; int count_tuple = 0; for (Expr arg : call_node->args) { if (arg->checked_type().as()) { ++count_tuple; } - for (Tensor tensor : VisitExpr(arg)) { + for (top::Tensor tensor : VisitExpr(arg)) { inputs.push_back(tensor); } } @@ -546,24 +546,24 @@ class MakeShapeFunc : public ExprFunctor(const Expr&)> { return outputs; } - Array VisitExpr_(const FunctionNode* op) final { + Array VisitExpr_(const FunctionNode* op) final { LOG(FATAL) << "Do not support sub function"; - return Array(); + return Array(); } - Array VisitExpr_(const LetNode* op) final { - Array val = VisitExpr(op->value); + Array VisitExpr_(const LetNode* op) final { + Array val = VisitExpr(op->value); CHECK(!memo_.count(op->var)); memo_[op->var] = val; return VisitExpr(op->body); } - Array VisitExpr_(const TupleNode* op) final { - Array fields; + Array VisitExpr_(const TupleNode* op) final { + Array fields; for (Expr field : op->fields) { CHECK(field->checked_type().as()) << "Only allow Tuple of Tensor"; - Array res = VisitExpr(field); + Array res = VisitExpr(field); CHECK_EQ(res.size(), 1); fields.push_back(res[0]); } @@ -576,15 +576,15 @@ class MakeShapeFunc : public ExprFunctor(const Expr&)> { /*! \brief Map from parameter to its shape function usage state */ std::unordered_map param_states_; /*! \brief Map from parameter to list of data placeholder */ - std::unordered_map, ObjectHash, ObjectEqual> param_data_; + std::unordered_map, ObjectHash, ObjectEqual> param_data_; /*! \brief Map from parameter to list of shape placeholder */ - std::unordered_map, ObjectHash, ObjectEqual> param_shapes_; + std::unordered_map, ObjectHash, ObjectEqual> param_shapes_; /*! \brief Memoized visit result */ - std::unordered_map, ObjectHash, ObjectEqual> memo_; + std::unordered_map, ObjectHash, ObjectEqual> memo_; /*! \brief Stack of data dependencies for shape function */ std::vector data_dependants_; /*! \brief Scalars used in the shape function */ - Array scalars_; + Array scalars_; }; class CompileEngineImpl : public CompileEngineNode { @@ -672,7 +672,7 @@ class CompileEngineImpl : public CompileEngineNode { * \return Pair of schedule and cache. * The funcs field in cache is not yet populated. */ - std::pair CreateSchedule( + std::pair CreateSchedule( const Function& source_func, const Target& target) { return ScheduleGetter(target).Create(source_func); } @@ -723,8 +723,8 @@ class CompileEngineImpl : public CompileEngineNode { cache_node->func_name = GetUniqueName(cache_node->func_name); // NOTE: array will copy on write. - Array all_args = cache_node->inputs; - for (Tensor arg : cache_node->outputs) { + Array all_args = cache_node->inputs; + for (top::Tensor arg : cache_node->outputs) { all_args.push_back(arg); } // lower the function @@ -733,7 +733,7 @@ class CompileEngineImpl : public CompileEngineNode { spair.first, all_args, cache_node->func_name, key->source_func); } else { tvm::BuildConfig bcfg = BuildConfig::Create(); - std::unordered_map binds; + std::unordered_map binds; cache_node->funcs = tvm::lower(spair.first, all_args, cache_node->func_name, binds, bcfg); } value->cached_func = CachedFunc(cache_node); @@ -763,12 +763,12 @@ class CompileEngineImpl : public CompileEngineNode { cache_node->func_name = GetUniqueName(cache_node->func_name); cache_node->target = key->target; - Array all_args = cache_node->inputs; - for (Tensor arg : cache_node->outputs) { + Array all_args = cache_node->inputs; + for (top::Tensor arg : cache_node->outputs) { all_args.push_back(arg); } tvm::BuildConfig bcfg = BuildConfig::Create(); - std::unordered_map binds; + std::unordered_map binds; cache_node->funcs = tvm::lower(spair.first, all_args, cache_node->func_name, binds, bcfg); value->cached_func = CachedFunc(cache_node); return value; diff --git a/src/relay/backend/compile_engine.h b/src/relay/backend/compile_engine.h index f6c38ba6b9a9..386eba7f9fd8 100644 --- a/src/relay/backend/compile_engine.h +++ b/src/relay/backend/compile_engine.h @@ -51,9 +51,9 @@ struct CachedFuncNode : public Object { /*! \brief Function name */ std::string func_name; /* \brief The inputs to the function */ - tvm::Array inputs; + tvm::Array inputs; /* \brief The outputs to the function */ - tvm::Array outputs; + tvm::Array outputs; /*! \brief The lowered functions to support the function. */ tvm::Array funcs; /*! \brief Parameter usage states in the shape function. */ diff --git a/src/relay/op/annotation/annotation.cc b/src/relay/op/annotation/annotation.cc index 3d03f884e247..2aefbd7ea883 100644 --- a/src/relay/op/annotation/annotation.cc +++ b/src/relay/op/annotation/annotation.cc @@ -78,8 +78,8 @@ TVM_ADD_FILELINE) .set_attr("TOpIsStateful", false) .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype, const Target& target) -> Array { return {topi::identity(inputs[0])}; }); @@ -104,8 +104,8 @@ TVM_ADD_FILELINE) .set_attr("TOpIsStateful", false) .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype, const Target& target) -> Array { return {topi::identity(inputs[0])}; }); @@ -122,8 +122,8 @@ Mark the start of bitpacking. .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype, const Target& target) -> Array { return {topi::identity(inputs[0])}; }); @@ -139,8 +139,8 @@ Mark the end of bitpacking. .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype, const Target& target) -> Array { return {topi::identity(inputs[0])}; }); @@ -162,9 +162,9 @@ Mark a checkpoint for checkpointing memory optimization. .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { - Array outputs; + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype, const Target& target) -> Array { + Array outputs; for (size_t i = 0; i < inputs.size(); ++i) { outputs.push_back(topi::identity(inputs[i])); } @@ -183,8 +183,8 @@ Beginning of a region that is handled by a given compiler. .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype, const Target& target) -> Array { return {topi::identity(inputs[0])}; }); @@ -208,8 +208,8 @@ End of a region that is handled by a given compiler. .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype, const Target& target) -> Array { return {topi::identity(inputs[0])}; }); diff --git a/src/relay/op/debug.cc b/src/relay/op/debug.cc index cdfdac06dbc5..87e579740892 100644 --- a/src/relay/op/debug.cc +++ b/src/relay/op/debug.cc @@ -35,11 +35,11 @@ namespace relay { TVM_REGISTER_NODE_TYPE(DebugAttrs); -Array DebugCompute(const Attrs& attrs, - const Array& inputs, +Array DebugCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { - return Array{ topi::identity(inputs[0]) }; + return Array{ topi::identity(inputs[0]) }; } RELAY_REGISTER_OP("debug") diff --git a/src/relay/op/memory/memory.cc b/src/relay/op/memory/memory.cc index bd3b543659ae..6c4b3ea87b0e 100644 --- a/src/relay/op/memory/memory.cc +++ b/src/relay/op/memory/memory.cc @@ -82,8 +82,8 @@ RELAY_REGISTER_OP("memory.alloc_storage") .set_attr("TNonComputational", true) .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype, const Target& target) -> Array { return {topi::identity(inputs[0])}; }); @@ -178,8 +178,8 @@ RELAY_REGISTER_OP("memory.alloc_tensor") .set_attr("TNonComputational", true) .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype, const Target& target) -> Array { return {topi::identity(inputs[0])}; }); @@ -227,8 +227,8 @@ RELAY_REGISTER_OP("memory.invoke_tvm_op") .set_attr("TNonComputational", true) .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype, const Target& target) -> Array { return {topi::identity(inputs[0])}; }); @@ -251,8 +251,8 @@ RELAY_REGISTER_OP("memory.kill") .set_attr("TNonComputational", true) .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype, const Target& target) -> Array { return {topi::identity(inputs[0])}; }); @@ -339,8 +339,8 @@ RELAY_REGISTER_OP("memory.shape_func") .set_attr("TNonComputational", true) .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype, const Target& target) -> Array { return {topi::identity(inputs[0])}; }); diff --git a/src/relay/op/nn/nn.cc b/src/relay/op/nn/nn.cc index f1d711146a99..9fca22d777d9 100644 --- a/src/relay/op/nn/nn.cc +++ b/src/relay/op/nn/nn.cc @@ -93,10 +93,10 @@ RELAY_REGISTER_OP("nn.bias_add") .add_argument("bias", "1D Tensor", "Bias.") .set_support_level(1) .add_type_rel("BiasAdd", BiasAddRel) -.set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, +.set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, const Type& out_type, const Target& target) { const auto* param = attrs.as(); - return tvm::Array{topi::nn::bias_add(inputs[0], inputs[1], param->axis)}; + return tvm::Array{topi::nn::bias_add(inputs[0], inputs[1], param->axis)}; }); @@ -233,11 +233,11 @@ RELAY_REGISTER_OP("nn.leaky_relu") .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr( "FTVMCompute", [](const Attrs& attrs, - const Array& inputs, + const Array& inputs, const Type& out_type, const Target& target) { const auto* param = attrs.as(); - return Array{ topi::leaky_relu(inputs[0], param->alpha) }; + return Array{ topi::leaky_relu(inputs[0], param->alpha) }; }); @@ -314,11 +314,11 @@ where :math:`*` is an channelwise multiplication for each sample in the batch. .set_attr("FInferCorrectLayout", PReluInferCorrectLayout) .set_attr( "FTVMCompute", [](const Attrs& attrs, - const Array& inputs, + const Array& inputs, const Type& out_type, const Target& target) { const auto* param = attrs.as(); - return Array{ topi::prelu(inputs[0], inputs[1], param->axis)}; + return Array{ topi::prelu(inputs[0], inputs[1], param->axis)}; }); @@ -350,12 +350,12 @@ RELAY_REGISTER_OP("nn.softmax") .set_support_level(1) .add_type_rel("Identity", IdentityRel) .set_attr("FTVMCompute", [](const Attrs& attrs, - const Array& inputs, + const Array& inputs, const Type& out_type, const Target& target) { const auto* param = attrs.as(); CHECK(param != nullptr); - return Array{ topi::nn::softmax(inputs[0], param->axis) }; + return Array{ topi::nn::softmax(inputs[0], param->axis) }; }); @@ -384,14 +384,14 @@ RELAY_REGISTER_OP("nn.log_softmax") .set_support_level(1) .add_type_rel("Identity", IdentityRel) .set_attr("FTVMCompute", [](const Attrs& attrs, - const Array& inputs, + const Array& inputs, const Type& out_type, const Target& target) { const auto* param = attrs.as(); CHECK(param != nullptr); CHECK(param->axis == -1 || param->axis == static_cast(inputs[0].ndim()) - 1) << "log_softmax currently only works on last dimension"; - return Array{ topi::nn::log_softmax(inputs[0]) }; + return Array{ topi::nn::log_softmax(inputs[0]) }; }); @@ -461,10 +461,10 @@ Example:: .add_type_rel("BatchFlatten", BatchFlattenRel) .set_attr( "FTVMCompute", [](const Attrs& attrs, - const Array& inputs, + const Array& inputs, const Type& out_type, const Target& target) { - return Array{ topi::nn::flatten(inputs[0]) }; + return Array{ topi::nn::flatten(inputs[0]) }; }); @@ -488,10 +488,10 @@ RELAY_REGISTER_OP("nn.relu") .add_type_rel("Identity", IdentityRel) .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) .set_attr("FTVMCompute", [](const Attrs& attrs, - const Array& inputs, + const Array& inputs, const Type& out_type, const Target& target) { - return Array{ topi::relu(inputs[0], 0.0f) }; + return Array{ topi::relu(inputs[0], 0.0f) }; }); diff --git a/src/relay/op/nn/pad.cc b/src/relay/op/nn/pad.cc index 72ea70f82eb8..b67f93928a88 100644 --- a/src/relay/op/nn/pad.cc +++ b/src/relay/op/nn/pad.cc @@ -160,8 +160,8 @@ bool PadRel(const Array& types, return true; } -Array PadCompute(const Attrs& attrs, - const Array& inputs, +Array PadCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const auto* param = attrs.as(); @@ -180,7 +180,7 @@ Array PadCompute(const Attrs& attrs, pad_after.push_back(pad_width[i][1]); } const auto* out_ttype = out_type.as(); - return Array{ topi::pad(inputs[0], pad_before, pad_after, + return Array{ topi::pad(inputs[0], pad_before, pad_after, tvm::make_const(out_ttype->dtype, param->pad_value), "T_pad", topi::kElementWise, diff --git a/src/relay/op/nn/pooling.cc b/src/relay/op/nn/pooling.cc index 660522590bbf..65fd09d93d4d 100644 --- a/src/relay/op/nn/pooling.cc +++ b/src/relay/op/nn/pooling.cc @@ -166,8 +166,8 @@ bool Pool2DRel(const Array& types, } template -Array Pool2DCompute(const Attrs& attrs, - const Array& inputs, +Array Pool2DCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { static const Layout kNCHW("NCHW"); @@ -203,11 +203,11 @@ Array Pool2DCompute(const Attrs& attrs, } if (mode == topi::nn::kAvgPool) { bool count_include_pad = reinterpret_cast(param)->count_include_pad; - return Array{ + return Array{ topi::nn::pool(inputs[0], pool_size, strides, padding, mode, ceil_mode, layout.name(), count_include_pad)}; } else { - return Array{ + return Array{ topi::nn::pool(inputs[0], pool_size, strides, padding, mode, ceil_mode, layout.name())}; } @@ -333,8 +333,8 @@ bool GlobalPool2DRel(const Array& types, template -Array GlobalPool2DCompute(const Attrs& attrs, - const Array& inputs, +Array GlobalPool2DCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { static const Layout kNCHW("NCHW"); @@ -351,7 +351,7 @@ Array GlobalPool2DCompute(const Attrs& attrs, CHECK(inputs[0].ndim() == 4U || inputs[0].ndim() == 5U) << "Pool2D only support 4-D input (e.g., NCHW)" << " or 5-D input (last dimension is a split of channel)"; - return Array{ + return Array{ topi::nn::global_pool(inputs[0], mode, layout.name()) }; } @@ -467,8 +467,8 @@ bool AdaptivePool2DRel(const Array& types, } template -Array AdaptivePool2DCompute(const Attrs& attrs, - const Array& inputs, +Array AdaptivePool2DCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { static const Layout kNCHW("NCHW"); @@ -500,7 +500,7 @@ Array AdaptivePool2DCompute(const Attrs& attrs, output_height = output_size[0]; output_width = output_size[1]; } - return Array{ + return Array{ topi::nn::adaptive_pool(inputs[0], Array{ output_height, output_width }, mode, layout.name()) }; } @@ -596,7 +596,7 @@ bool Pool2DGradRel(const Array& types, int num_inputs, const Attrs& attrs, } template -Array Pool2DGradCompute(const Attrs& attrs, const Array& inputs, +Array Pool2DGradCompute(const Attrs& attrs, const Array& inputs, const Type& out_type, const Target& target) { static const Layout kNCHW("NCHW"); const auto* param = attrs.as(); @@ -633,10 +633,10 @@ Array Pool2DGradCompute(const Attrs& attrs, const Array& inputs, } if (mode == topi::nn::kAvgPool) { bool count_include_pad = reinterpret_cast(param)->count_include_pad; - return Array{topi::nn::pool_grad(inputs[0], inputs[1], pool_size, strides, padding, + return Array{topi::nn::pool_grad(inputs[0], inputs[1], pool_size, strides, padding, mode, ceil_mode, layout.name(), count_include_pad)}; } else { - return Array{topi::nn::pool_grad(inputs[0], inputs[1], pool_size, strides, padding, + return Array{topi::nn::pool_grad(inputs[0], inputs[1], pool_size, strides, padding, mode, ceil_mode, layout.name())}; } } @@ -798,8 +798,8 @@ bool Pool1DRel(const Array& types, template -Array Pool1DCompute(const Attrs& attrs, - const Array& inputs, +Array Pool1DCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { static const Layout kNCW("NCW"); @@ -825,11 +825,11 @@ Array Pool1DCompute(const Attrs& attrs, if (mode == topi::nn::kAvgPool) { bool count_include_pad = reinterpret_cast(param)->count_include_pad; - return Array{ + return Array{ topi::nn::pool1d(inputs[0], pool_size, strides, padding, mode, ceil_mode, layout.name(), count_include_pad)}; } else { - return Array{ + return Array{ topi::nn::pool1d(inputs[0], pool_size, strides, padding, mode, ceil_mode, layout.name())}; } @@ -993,8 +993,8 @@ bool Pool3DRel(const Array& types, template -Array Pool3DCompute(const Attrs& attrs, - const Array& inputs, +Array Pool3DCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { static const Layout kNCDHW("NCDHW"); @@ -1033,11 +1033,11 @@ Array Pool3DCompute(const Attrs& attrs, } if (mode == topi::nn::kAvgPool) { bool count_include_pad = reinterpret_cast(param)->count_include_pad; - return Array{ + return Array{ topi::nn::pool3d(inputs[0], pool_size, strides, padding, mode, ceil_mode, layout.name(), count_include_pad)}; } else { - return Array{ + return Array{ topi::nn::pool3d(inputs[0], pool_size, strides, padding, mode, ceil_mode, layout.name())}; } diff --git a/src/relay/op/tensor/binary.cc b/src/relay/op/tensor/binary.cc index f2f899657bc0..00ebddb7e79c 100644 --- a/src/relay/op/tensor/binary.cc +++ b/src/relay/op/tensor/binary.cc @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -32,9 +32,9 @@ namespace relay { #define RELAY_BINARY_COMPUTE(FTOPI) \ [] (const Attrs& attrs, \ - const Array& inputs, \ + const Array& inputs, \ const Type& out_type, \ - const Target& target) -> Array { \ + const Target& target) -> Array { \ CHECK_EQ(inputs.size(), 2U); \ return {FTOPI(inputs[0], inputs[1])}; \ } \ diff --git a/src/relay/op/tensor/reduce.cc b/src/relay/op/tensor/reduce.cc index dde3ef2f0420..e20b7cfc52c1 100644 --- a/src/relay/op/tensor/reduce.cc +++ b/src/relay/op/tensor/reduce.cc @@ -173,8 +173,8 @@ Array> ReduceInferCorrectLayout(const Attrs& attrs, } template -Array ReduceCompute(const Attrs& attrs, - const Array& inputs, +Array ReduceCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target, F f) { @@ -320,8 +320,8 @@ bool ReduceRel(const Array& types, .add_argument("data", "Tensor", "The input tensor.") -Array ArgMaxCompute(const Attrs& attrs, - const Array& inputs, +Array ArgMaxCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { return ReduceCompute(attrs, inputs, out_type, target, topi::argmax); @@ -340,8 +340,8 @@ values over a given axis. .set_attr("TOpPattern", kCommReduce); -Array ArgMinCompute(const Attrs& attrs, - const Array& inputs, +Array ArgMinCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { return ReduceCompute(attrs, inputs, out_type, target, topi::argmin); @@ -358,8 +358,8 @@ values over a given axis. .set_attr("FTVMCompute", ArgMinCompute) .set_attr("TOpPattern", kCommReduce); -Array SumCompute(const Attrs& attrs, - const Array& inputs, +Array SumCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { return ReduceCompute(attrs, inputs, out_type, target, topi::sum); @@ -392,8 +392,8 @@ Example:: .set_attr("TOpPattern", kCommReduce); -Array AllCompute(const Attrs& attrs, - const Array& inputs, +Array AllCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { return ReduceCompute(attrs, inputs, out_type, target, topi::all); @@ -429,8 +429,8 @@ Example:: .set_attr("TOpPattern", kCommReduce); -Array AnyCompute(const Attrs& attrs, - const Array& inputs, +Array AnyCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { return ReduceCompute(attrs, inputs, out_type, target, topi::any); @@ -466,8 +466,8 @@ Example:: .set_attr("TOpPattern", kCommReduce); -Array MaxCompute(const Attrs& attrs, - const Array& inputs, +Array MaxCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { return ReduceCompute(attrs, inputs, out_type, target, topi::max); @@ -484,8 +484,8 @@ RELAY_REGISTER_REDUCE_OP("max") .set_attr("TOpPattern", kCommReduce); -Array MinCompute(const Attrs& attrs, - const Array& inputs, +Array MinCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { return ReduceCompute(attrs, inputs, out_type, target, topi::min); @@ -503,8 +503,8 @@ RELAY_REGISTER_REDUCE_OP("min") .set_attr("TOpPattern", kCommReduce); -Array ProdCompute(const Attrs& attrs, - const Array& inputs, +Array ProdCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { return ReduceCompute(attrs, inputs, out_type, target, topi::prod); @@ -533,8 +533,8 @@ Example:: .set_attr("TOpPattern", kCommReduce); -Array MeanCompute(const Attrs& attrs, - const Array& inputs, +Array MeanCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { IndexExpr count = make_const(inputs[0]->dtype, 1); @@ -598,8 +598,8 @@ bool VarianceRel(const Array& types, return true; } -Array VarianceCompute(const Attrs& attrs, - const Array& inputs, +Array VarianceCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { IndexExpr count = make_const(inputs[0]->dtype, 1); diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc index 59bb432e756d..b11ec72c570c 100644 --- a/src/relay/op/tensor/transform.cc +++ b/src/relay/op/tensor/transform.cc @@ -65,8 +65,8 @@ bool CastRel(const Array& types, return true; } -Array CastCompute(const Attrs& attrs, - const Array& inputs, +Array CastCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const CastAttrs *param = attrs.as(); @@ -125,8 +125,8 @@ bool CastLikeRel(const Array& types, } -Array CastLikeCompute(const Attrs& attrs, - const Array& inputs, +Array CastLikeCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { return { topi::cast(inputs[0], inputs[1]->dtype) }; @@ -156,7 +156,7 @@ RELAY_REGISTER_OP("cast_like") .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout); -Array ReinterpretCompute(const Attrs& attrs, const Array& inputs, +Array ReinterpretCompute(const Attrs& attrs, const Array& inputs, const Type& out_type, const Target& target) { const CastAttrs* param = attrs.as(); CHECK(param != nullptr); @@ -230,8 +230,8 @@ bool ExpandDimsRel(const Array& types, return true; } -Array ExpandDimsCompute(const Attrs& attrs, - const Array& inputs, +Array ExpandDimsCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const ExpandDimsAttrs *param = attrs.as(); @@ -269,8 +269,8 @@ RELAY_REGISTER_OP("expand_dims") // relay.concatenate TVM_REGISTER_NODE_TYPE(ConcatenateAttrs); -Array ConcatenateCompute(const Attrs& attrs, - const Array& inputs, +Array ConcatenateCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const ConcatenateAttrs *param = attrs.as(); @@ -412,8 +412,8 @@ bool StackRel(const Array& types, return true; } -Array StackCompute(const Attrs& attrs, - const Array& inputs, +Array StackCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const StackAttrs *param = attrs.as(); @@ -504,13 +504,13 @@ bool TransposeRel(const Array& types, return true; } -Array TransposeCompute(const Attrs& attrs, - const Array& inputs, +Array TransposeCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const auto* param = attrs.as(); CHECK(param != nullptr); - return Array{ topi::transpose(inputs[0], param->axes) }; + return Array{ topi::transpose(inputs[0], param->axes) }; } Expr MakeTranspose(Expr data, @@ -687,8 +687,8 @@ bool ReshapeRel(const Array& types, return true; } -Array ReshapeCompute(const Attrs& attrs, - const Array& inputs, +Array ReshapeCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const auto* out_ttype = out_type.as(); @@ -922,16 +922,16 @@ bool TakeRel(const Array& types, return true; } -Array TakeCompute(const Attrs& attrs, - const Array& inputs, +Array TakeCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const auto* param = attrs.as(); CHECK(param != nullptr); if (!param->axis.defined()) { - return Array{ topi::take(inputs[0], inputs[1], param->mode) }; + return Array{ topi::take(inputs[0], inputs[1], param->mode) }; } else { - return Array{ topi::take(inputs[0], inputs[1], param->axis, param->mode) }; + return Array{ topi::take(inputs[0], inputs[1], param->axis, param->mode) }; } } @@ -1009,8 +1009,8 @@ bool FullRel(const Array& types, return true; } -Array FullCompute(const Attrs& attrs, - const Array& inputs, +Array FullCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const auto* out_ttype = out_type.as(); @@ -1117,8 +1117,8 @@ bool FullLikeRel(const Array& types, return true; } -Array FullLikeCompute(const Attrs& attrs, - const Array& inputs, +Array FullLikeCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { return { topi::full_like(inputs[0], inputs[1]()) }; @@ -1217,26 +1217,26 @@ bool ArangeRel(const Array& types, } } -inline Tensor DynamicArange(const tvm::Tensor& start, - const tvm::Tensor& stop, - const tvm::Tensor& step, - tvm::DataType dtype, - std::string name = "tensor", - std::string tag = topi::kInjective) { +inline top::Tensor DynamicArange(const top::Tensor& start, + const top::Tensor& stop, + const top::Tensor& step, + tvm::DataType dtype, + std::string name = "tensor", + std::string tag = topi::kInjective) { tvm::PrimExpr num_elem = tvm::Var("num_elem"); - return tvm::compute({num_elem}, [&](const Array& indices) { + return top::compute({num_elem}, [&](const Array& indices) { return tvm::cast(dtype, start[0] + step[0] * indices[0]); }, name, tag); } -Array ArangeCompute(const Attrs& attrs, - const Array& inputs, - const Type& out_type, - const Target& target) { +Array ArangeCompute(const Attrs& attrs, + const Array& inputs, + const Type& out_type, + const Target& target) { const ArangeAttrs* param = attrs.as(); - Tensor start = inputs[0]; - Tensor stop = inputs[1]; - Tensor step = inputs[2]; + top::Tensor start = inputs[0]; + top::Tensor stop = inputs[1]; + top::Tensor step = inputs[2]; Array empty = {0}; return { DynamicArange(start, stop, step, param->dtype) }; } @@ -1324,8 +1324,8 @@ bool RepeatRel(const Array& types, return true; } -Array RepeatCompute(const Attrs& attrs, - const Array& inputs, +Array RepeatCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const RepeatAttrs *param = attrs.as(); @@ -1435,8 +1435,8 @@ bool TileRel(const Array& types, return true; } -Array TileCompute(const Attrs& attrs, - const Array& inputs, +Array TileCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const TileAttrs *param = attrs.as(); @@ -1496,8 +1496,8 @@ bool ReverseRel(const Array& types, return true; } -Array ReverseCompute(const Attrs& attrs, - const Array& inputs, +Array ReverseCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const ReverseAttrs *param = attrs.as(); @@ -1570,8 +1570,8 @@ Expr MakeWhere(const Expr& condition, const Expr& x, const Expr& y) { return CallNode::make(op, {condition, x, y}); } -Array WhereCompute(const Attrs& attrs, - const Array& inputs, +Array WhereCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { return { topi::where(inputs[0], inputs[1], inputs[2]) }; @@ -1687,8 +1687,8 @@ bool SqueezeRel(const Array& types, return true; } -Array SqueezeCompute(const Attrs& attrs, - const Array& inputs, +Array SqueezeCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const SqueezeAttrs *param = attrs.as(); @@ -1728,8 +1728,8 @@ Expr MakeCollapseSumLike(Expr data, return CallNode::make(op, {data, collapse_type}, Attrs(), {}); } -Array CollapseSumLikeCompute(const Attrs& attrs, - const Array& inputs, +Array CollapseSumLikeCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const auto* out_ttype = out_type.as(); @@ -1773,8 +1773,8 @@ Expr MakeBroadCastTo(Expr data, Array shape) { return CallNode::make(op, {data}, Attrs(attrs), {}); } -Array BroadCastToCompute(const Attrs& attrs, - const Array& inputs, +Array BroadCastToCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { auto ioattrs = attrs.as(); @@ -1811,8 +1811,8 @@ Expr MakeBroadCastToLike(Expr data, return CallNode::make(op, {data, broadcast_type}, Attrs(), {}); } -Array BroadCastToLikeCompute(const Attrs& attrs, - const Array& inputs, +Array BroadCastToLikeCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const auto* out_ttype = out_type.as(); @@ -2018,13 +2018,13 @@ Expr MakeStridedSlice(Expr data, return CallNode::make(op, {data}, Attrs(attrs), {}); } -Array StridedSliceCompute(const Attrs& attrs, - const Array& inputs, +Array StridedSliceCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const StridedSliceAttrs *param = attrs.as(); CHECK(param != nullptr); - return Array{ + return Array{ topi::strided_slice(inputs[0], param->begin, param->end, param->strides) }; } @@ -2175,8 +2175,8 @@ bool SplitRel(const Array& types, return true; } -Array SplitCompute(const Attrs& attrs, - const Array& inputs, +Array SplitCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const auto param = attrs.as(); @@ -2184,11 +2184,11 @@ Array SplitCompute(const Attrs& attrs, if (const IntImmNode* sections = param->indices_or_sections.as()) { int64_t num_sections = sections->value; - return Array{ + return Array{ topi::split_sections(inputs[0], num_sections, param->axis) }; } else { auto indices = Downcast >(param->indices_or_sections); - return Array{ topi::split(inputs[0], indices, param->axis) }; + return Array{ topi::split(inputs[0], indices, param->axis) }; } } @@ -2304,8 +2304,8 @@ Expr MakeSliceLike(Expr data, return CallNode::make(op, {data, shape_like}, Attrs(attrs), {}); } -Array SliceLikeCompute(const Attrs& attrs, - const Array& inputs, +Array SliceLikeCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const auto* param = attrs.as(); @@ -2342,7 +2342,7 @@ Array SliceLikeCompute(const Attrs& attrs, << topi::GetConstInt(src_shape[axis]); } } - return Array{ + return Array{ topi::strided_slice(inputs[0], GetIntArray(begin_idx), GetIntArray(end_idx), @@ -2370,13 +2370,13 @@ RELAY_REGISTER_OP("slice_like") // relay.layout_transform TVM_REGISTER_NODE_TYPE(LayoutTransformAttrs); -Array LayoutTransformCompute(const Attrs& attrs, - const Array& inputs, +Array LayoutTransformCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const auto* param = attrs.as(); CHECK(param != nullptr); - return Array{ + return Array{ topi::layout_transform(inputs[0], param->src_layout, param->dst_layout) }; } @@ -2503,8 +2503,8 @@ bool GatherNDRel(const Array& types, return true; } -Array GatherNDCompute(const Attrs& attrs, - const Array& inputs, +Array GatherNDCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { return { topi::gather_nd(inputs[0], inputs[1]) }; @@ -2557,13 +2557,13 @@ bool SequenceMaskRel(const Array& types, return true; } -Array SequenceMaskCompute(const Attrs& attrs, - const Array& inputs, +Array SequenceMaskCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const auto* param = attrs.as(); CHECK(param != nullptr); - return Array{ topi::sequence_mask(inputs[0], inputs[1], param->mask_value, param->axis) }; + return Array{ topi::sequence_mask(inputs[0], inputs[1], param->mask_value, param->axis) }; } Expr MakeSequenceMask(Expr data, @@ -2669,13 +2669,13 @@ bool OneHotRel(const Array& types, return true; } -Array OneHotCompute(const Attrs& attrs, - const Array& inputs, +Array OneHotCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { const auto* param = attrs.as(); CHECK(param != nullptr); - return Array { + return Array { topi::one_hot(inputs[0], inputs[1](), inputs[2](), diff --git a/src/relay/op/tensor/unary.cc b/src/relay/op/tensor/unary.cc index 5189f7d97ec3..331653b49445 100644 --- a/src/relay/op/tensor/unary.cc +++ b/src/relay/op/tensor/unary.cc @@ -34,9 +34,9 @@ namespace relay { #define RELAY_UNARY_COMPUTE(FTOPI) \ [] (const Attrs& attrs, \ - const Array& inputs, \ + const Array& inputs, \ const Type& out_type, \ - const Target& target) -> Array { \ + const Target& target) -> Array { \ return {FTOPI(inputs[0])}; \ } \ @@ -290,8 +290,8 @@ bool ShapeOfRel(const Array& types, return true; } -Array ShapeOfCompute(const Attrs& attrs, - const Array& inputs, +Array ShapeOfCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { CHECK_EQ(inputs.size(), 1); @@ -341,14 +341,14 @@ bool NdarraySizeRel(const Array& types, return true; } -Array NdarraySizeCompute(const Attrs& attrs, - const Array& inputs, +Array NdarraySizeCompute(const Attrs& attrs, + const Array& inputs, const Type& out_type, const Target& target) { CHECK_EQ(inputs.size(), 1); const auto* param = attrs.as(); CHECK(param != nullptr); - return Array{topi::ndarray_size(inputs[0], param->dtype)}; + return Array{topi::ndarray_size(inputs[0], param->dtype)}; } TVM_REGISTER_GLOBAL("relay.op.contrib._make.ndarray_size") diff --git a/src/relay/op/vision/yolo.cc b/src/relay/op/vision/yolo.cc index 616dc2a5510b..9964a8274392 100644 --- a/src/relay/op/vision/yolo.cc +++ b/src/relay/op/vision/yolo.cc @@ -82,12 +82,12 @@ Its function is mostly shape transform.")doc" TVM_ADD_FILELINE) .set_attrs_type() .add_type_rel("YoloReorg", YoloReorgRel) .set_attr("FTVMCompute", [](const Attrs& attrs, - const Array& inputs, + const Array& inputs, const Type& out_type, const Target& target) { const auto* params = attrs.as(); CHECK(params != nullptr); - return Array{ topi::vision::reorg(inputs[0], params->stride) }; + return Array{ topi::vision::reorg(inputs[0], params->stride) }; }); } // namespace relay diff --git a/src/relay/pass/alter_op_layout.cc b/src/relay/pass/alter_op_layout.cc index b027e5edb571..efc7eb57e082 100644 --- a/src/relay/pass/alter_op_layout.cc +++ b/src/relay/pass/alter_op_layout.cc @@ -78,10 +78,10 @@ class AlterTransformMemorizer : public TransformMemorizer { Expr new_e; bool modified = false; if (falter_layout.count(op)) { - tvm::Array tinfos; + tvm::Array tinfos; for (auto expr : ref_call->args) { auto ttype = expr->type_as(); - tinfos.push_back(tvm::placeholder(ttype->shape, ttype->dtype)); + tinfos.push_back(tvm::top::placeholder(ttype->shape, ttype->dtype)); } Expr altered_value = falter_layout[op](ref_call->attrs, new_args, tinfos); if (altered_value.defined()) { diff --git a/src/relay/pass/convert_layout.cc b/src/relay/pass/convert_layout.cc index 20007d289327..18a56a4a62e4 100644 --- a/src/relay/pass/convert_layout.cc +++ b/src/relay/pass/convert_layout.cc @@ -86,10 +86,10 @@ class ConvertTransformMemorizer : public TransformMemorizer { Expr new_e; bool modified = false; if (fconvert_layout.count(op)) { - tvm::Array tinfos; + tvm::Array tinfos; for (auto expr : ref_call->args) { auto ttype = expr->type_as(); - tinfos.push_back(tvm::placeholder(ttype->shape, ttype->dtype)); + tinfos.push_back(tvm::top::placeholder(ttype->shape, ttype->dtype)); } Expr altered_value = fconvert_layout[op](ref_call->attrs, new_args, tinfos, operator->()->desired_layout_); diff --git a/src/schedule/auto_inline_elem_wise.cc b/src/schedule/auto_inline_elem_wise.cc index 3e3292303e9d..0429ff6966fc 100644 --- a/src/schedule/auto_inline_elem_wise.cc +++ b/src/schedule/auto_inline_elem_wise.cc @@ -25,7 +25,7 @@ #include namespace tvm { -namespace schedule { +namespace top { using namespace ir; @@ -111,5 +111,5 @@ void AutoInlineInjective(Schedule sch) { } } -} // namespace schedule +} // namespace top } // namespace tvm diff --git a/src/schedule/bound.cc b/src/schedule/bound.cc index 5f363dbc126e..3cc4f92e2434 100644 --- a/src/schedule/bound.cc +++ b/src/schedule/bound.cc @@ -31,7 +31,7 @@ #include "../runtime/thread_storage_scope.h" namespace tvm { -namespace schedule { +namespace top { using runtime::StorageRank; using runtime::StorageScope; @@ -259,5 +259,5 @@ Map InferBound(const Schedule& sch) { return Map(ret.begin(), ret.end()); } -} // namespace schedule +} // namespace top } // namespace tvm diff --git a/src/schedule/graph.cc b/src/schedule/graph.cc index 3cf30f4ea7e2..bc85f0e1eaa0 100644 --- a/src/schedule/graph.cc +++ b/src/schedule/graph.cc @@ -30,7 +30,7 @@ #include "graph.h" namespace tvm { -namespace schedule { +namespace top { // key to specific tensor dimension. struct TensorDimKey { ir::FunctionRef f; @@ -55,13 +55,13 @@ struct TensorDimKey { return !operator==(other); } }; -} // namespace schedule +} // namespace top } // namespace tvm namespace std { template <> -struct hash<::tvm::schedule::TensorDimKey> { - std::size_t operator()(const ::tvm::schedule::TensorDimKey& k) const { +struct hash<::tvm::top::TensorDimKey> { + std::size_t operator()(const ::tvm::top::TensorDimKey& k) const { size_t lhs = ::tvm::ObjectHash()(k.f); size_t rhs = static_cast(k.value_index) << 16UL | static_cast(k.dim); @@ -73,7 +73,7 @@ struct hash<::tvm::schedule::TensorDimKey> { namespace tvm { -namespace schedule { +namespace top { // construct a read graph that gives readers of each operation // that the root depend on @@ -429,5 +429,5 @@ Map ScanFixPointAnalysis(const Operation& scan_op) { return ret; } -} // namespace schedule +} // namespace top } // namespace tvm diff --git a/src/schedule/graph.h b/src/schedule/graph.h index 99ba6e38580a..f8dc344ef8e8 100644 --- a/src/schedule/graph.h +++ b/src/schedule/graph.h @@ -32,7 +32,7 @@ #include namespace tvm { -namespace schedule { +namespace top { /*! * \brief data structure of Operation->Tensors it reads @@ -125,7 +125,7 @@ Array ScanGetBody(const Operation& scan_op); */ Map ScanFixPointAnalysis(const Operation& scan); -} // namespace schedule +} // namespace top } // namespace tvm #endif // TVM_SCHEDULE_GRAPH_H_ diff --git a/src/schedule/message_passing.cc b/src/schedule/message_passing.cc index 816ea440b566..ce1a4ca624d7 100644 --- a/src/schedule/message_passing.cc +++ b/src/schedule/message_passing.cc @@ -28,7 +28,7 @@ #include "../arith/compute_expr.h" namespace tvm { -namespace schedule { +namespace top { using namespace ir; @@ -539,5 +539,5 @@ std::vector MakeBoundCheck( } return preds; } -} // namespace schedule +} // namespace top } // namespace tvm diff --git a/src/schedule/message_passing.h b/src/schedule/message_passing.h index 4b81bccf4b2f..a0fc175cfe88 100644 --- a/src/schedule/message_passing.h +++ b/src/schedule/message_passing.h @@ -34,7 +34,7 @@ #include namespace tvm { -namespace schedule { +namespace top { /*! * \brief Downward inference of domain of each IterVar. * Caller set the range of the root, then the function @@ -128,6 +128,6 @@ MakeBoundCheck( bool skip_ivar_domain, const std::unordered_set& skip_iter); -} // namespace schedule +} // namespace top } // namespace tvm #endif // TVM_SCHEDULE_MESSAGE_PASSING_H_ diff --git a/src/schedule/schedule_dataflow_rewrite.cc b/src/schedule/schedule_dataflow_rewrite.cc index f06cf96c551b..1daeee831f0b 100644 --- a/src/schedule/schedule_dataflow_rewrite.cc +++ b/src/schedule/schedule_dataflow_rewrite.cc @@ -30,7 +30,7 @@ #include "../arith/compute_expr.h" namespace tvm { - +namespace top { // find first occurance location in leaf template size_t FindNodeRef(ArrayNode* array_node, const T& v) { @@ -211,7 +211,7 @@ void PrepareAxisMapping(Stage orig_stage, dom_map[iv] = iv->dom; analyzer.Bind(iv->var, iv->dom); } - schedule::PassDownDomain(orig_stage, &dom_map, &analyzer, true); + top::PassDownDomain(orig_stage, &dom_map, &analyzer, true); { // The source->cache std::unordered_map value_map; @@ -235,8 +235,8 @@ void PrepareAxisMapping(Stage orig_stage, for (IterVar iv : op->reduce_axis) { skip_bound_check.insert(iv); } - schedule::PassUpIndex(orig_stage, dom_map, &value_map, true); - predicates = schedule::MakeBoundCheck( + PassUpIndex(orig_stage, dom_map, &value_map, true); + predicates = MakeBoundCheck( orig_stage, dom_map, value_map, true, skip_bound_check); // The root axis for (IterVar iv : op->axis) { @@ -347,7 +347,7 @@ Array CacheWriteWithReLayout(Schedule sch, for (IterVar iv : compute->axis) { value_map[iv] = iv->var; } - schedule::PassDownIndex(orig_stage, dom_map, &value_map, true); + top::PassDownIndex(orig_stage, dom_map, &value_map, true); for (IterVar iv : orig_stage->leaf_iter_vars) { if (red_axis.count(iv)) continue; args.push_back(value_map.at(iv)); @@ -437,7 +437,7 @@ Array CacheWriteWithReLayoutTensor(Schedule sch, for (IterVar iv : compute_axis) { value_map[iv] = iv->var; } - schedule::PassDownIndex(orig_stage, dom_map, &value_map, true); + PassDownIndex(orig_stage, dom_map, &value_map, true); for (IterVar iv : orig_stage->leaf_iter_vars) { if (red_axis.count(iv)) continue; args.push_back(value_map.at(iv)); @@ -692,8 +692,8 @@ Array Schedule::rfactor(const Tensor& tensor, // Find touched reduction axis. std::unordered_map touch_map; touch_map[axis] = 1; - schedule::PassUpBitMaskOr(reduce_stage, &touch_map, true); - schedule::PassDownBitMaskOr(reduce_stage, &touch_map, true); + top::PassUpBitMaskOr(reduce_stage, &touch_map, true); + top::PassDownBitMaskOr(reduce_stage, &touch_map, true); // skip reduction iteration. std::unordered_set skip_bound_check; // Verify normal axis are not touched. @@ -715,7 +715,7 @@ Array Schedule::rfactor(const Tensor& tensor, } analyzer.Bind(iv->var, iv->dom); } - schedule::PassDownDomain(reduce_stage, &dom_map, &analyzer, true); + top::PassDownDomain(reduce_stage, &dom_map, &analyzer, true); for (IterVar iv : reduce_stage->leaf_iter_vars) { if (touch_map.count(iv)) { Range dom = dom_map.at(iv); @@ -726,8 +726,8 @@ Array Schedule::rfactor(const Tensor& tensor, } } } - schedule::PassUpIndex(reduce_stage, dom_map, &value_map, true); - std::vector predicates = schedule::MakeBoundCheck( + top::PassUpIndex(reduce_stage, dom_map, &value_map, true); + std::vector predicates = MakeBoundCheck( reduce_stage, dom_map, value_map, true, skip_bound_check); // Get the factored op node. @@ -881,5 +881,5 @@ Array Schedule::rfactor(const Tensor& tensor, reduce_stage->relations = Array(); return factor_tensors; } - +} // namespace top } // namespace tvm diff --git a/src/schedule/schedule_lang.cc b/src/schedule/schedule_lang.cc index fe56b662dfcd..4b8b25cc0a70 100644 --- a/src/schedule/schedule_lang.cc +++ b/src/schedule/schedule_lang.cc @@ -26,8 +26,7 @@ #include "graph.h" namespace tvm { - -namespace { +namespace top { // find first occurance location in leaf template @@ -84,8 +83,6 @@ void Split(StageNode* self, leaf_vars->data.insert(leaf_vars->data.begin() + pos, outer); } -} // namespace - Stage::Stage(Operation op) { auto n = make_object(); n->op = op; @@ -594,7 +591,7 @@ Stage Schedule::create_group(const Array& outputs, self->InitCache(); const auto& op2stage_cache = self->op2stage_cache_; // Get the ops. - Array ops = schedule::GetSubGraph( + Array ops = top::GetSubGraph( RemapTensor(self, outputs), RemapTensor(self, inputs), include_inputs); @@ -718,8 +715,8 @@ Schedule ScheduleNode::make(Array ops) { auto n = make_object(); Schedule sch(n); n->outputs = ops; - auto g = schedule::CreateReadGraph(n->outputs); - Array post_order = schedule::PostDFSOrder(n->outputs, g); + auto g = top::CreateReadGraph(n->outputs); + Array post_order = top::PostDFSOrder(n->outputs, g); // output set. std::unordered_set output_set; for (Operation x : ops) { @@ -851,4 +848,5 @@ TVM_STATIC_IR_FUNCTOR(NodePrinter, vtable) auto* op = static_cast(node.get()); p->stream << "schedule(" << op << ")"; }); +} // namespace top } // namespace tvm diff --git a/src/schedule/schedule_ops.cc b/src/schedule/schedule_ops.cc index 1eb595c852c0..a3beeb548971 100644 --- a/src/schedule/schedule_ops.cc +++ b/src/schedule/schedule_ops.cc @@ -33,7 +33,7 @@ #include "../pass/ir_util.h" namespace tvm { -namespace schedule { +namespace top { using namespace ir; @@ -423,5 +423,5 @@ Stmt ScheduleOps( return post_proc(std::move(body)); } -} // namespace schedule +} // namespace top } // namespace tvm diff --git a/tests/cpp/build_module_test.cc b/tests/cpp/build_module_test.cc index 15bdd474f803..916f7ef037fe 100644 --- a/tests/cpp/build_module_test.cc +++ b/tests/cpp/build_module_test.cc @@ -30,6 +30,7 @@ TEST(BuildModule, Basic) { using namespace tvm; + using namespace tvm::top; auto n = var("n"); Array shape; shape.push_back(n); @@ -75,6 +76,7 @@ TEST(BuildModule, Heterogeneous) { */ using namespace tvm; + using namespace tvm::top; const runtime::PackedFunc* pf = runtime::Registry::Get("module._Enabled"); bool enabled = (*pf)("cuda"); if (!enabled) { diff --git a/tests/cpp/simple_passes_test.cc b/tests/cpp/simple_passes_test.cc index 6333f1549bec..aabb6c31e0db 100644 --- a/tests/cpp/simple_passes_test.cc +++ b/tests/cpp/simple_passes_test.cc @@ -28,7 +28,7 @@ TEST(SimplePasses, HasSideEffect) { Array shape; shape.push_back(n); - auto A = placeholder(shape, DataType::Float(32), "A"); + auto A = top::placeholder(shape, DataType::Float(32), "A"); CHECK(!tvm::ir::HasSideEffect(A[0])); } diff --git a/tests/cpp/tensor_test.cc b/tests/cpp/tensor_test.cc index c994c0fbc355..f8973211231c 100644 --- a/tests/cpp/tensor_test.cc +++ b/tests/cpp/tensor_test.cc @@ -23,6 +23,8 @@ TEST(Tensor, Basic) { using namespace tvm; + using namespace tvm::top; + Var m("m"), n("n"), l("l"); Tensor A = placeholder({m, l}, DataType::Float(32), "A"); @@ -37,15 +39,17 @@ TEST(Tensor, Basic) { TEST(Tensor, Reduce) { using namespace tvm; + using namespace tvm::top; + Var m("m"), n("n"), l("l"); - Tensor A = placeholder({m, l}, DataType::Float(32), "A"); - Tensor B = placeholder({n, l}, DataType::Float(32), "B"); + top::Tensor A = top::placeholder({m, l}, DataType::Float(32), "A"); + top::Tensor B = top::placeholder({n, l}, DataType::Float(32), "B"); IterVar rv = reduce_axis(Range{0, l}, "k"); - auto C = compute({m, n}, [&](Var i, Var j) { + auto C = top::compute({m, n}, [&](Var i, Var j) { return sum(max(1 + A[i][rv] + 1, B[j][rv]), {rv}); }, "C"); - LOG(INFO) << C->op.as()->body; + LOG(INFO) << C->op.as()->body; } int main(int argc, char ** argv) { diff --git a/topi/include/topi/broadcast.h b/topi/include/topi/broadcast.h index ce16e23bf4fa..47dfc3e77f09 100644 --- a/topi/include/topi/broadcast.h +++ b/topi/include/topi/broadcast.h @@ -43,7 +43,7 @@ namespace topi { * * \return A Tensor whose op member is a broadcast operation */ -inline tvm::Tensor broadcast_to(const tvm::Tensor& t, +inline tvm::top::Tensor broadcast_to(const tvm::top::Tensor& t, const tvm::Array& output_shape, std::string name = "T_broadcast_to", std::string tag = kBroadcast) { @@ -58,7 +58,7 @@ inline tvm::Tensor broadcast_to(const tvm::Tensor& t, auto l = [&](tvm::Array ovars) { return t(detail::InputIndexFromBroadcast(ovars, t, bh.vars2, bh.all_vars)); }; - return tvm::compute( + return tvm::top::compute( tvm::Array(bh.common_shape.begin(), bh.common_shape.end()), l, name, @@ -70,44 +70,44 @@ inline tvm::Tensor broadcast_to(const tvm::Tensor& t, const tvm::PrimExpr& b) { \ ComputeRule; \ } \ - inline tvm::Tensor Name(const tvm::Tensor& A, \ - const tvm::Tensor& B, \ - std::string name = "T_" #Name, \ - std::string tag = kBroadcast) { \ + inline tvm::top::Tensor Name(const tvm::top::Tensor& A, \ + const tvm::top::Tensor& B, \ + std::string name = "T_" #Name, \ + std::string tag = kBroadcast) { \ auto l = [](tvm::PrimExpr a, tvm::PrimExpr b) { ComputeRule; }; \ return detail::WithBroadcast(l, A, B, name, tag); \ } \ - inline tvm::Tensor Name(const tvm::Tensor& A, \ + inline tvm::top::Tensor Name(const tvm::top::Tensor& A, \ const tvm::PrimExpr& B, \ std::string name = "T_" #Name, \ std::string tag = kElementWise) { \ - auto l = [](tvm::PrimExpr a, tvm::PrimExpr b) { ComputeRule; }; \ - return compute(A->shape, [&](const ::tvm::Array<::tvm::Var>& i) { \ + auto l = [](tvm::PrimExpr a, tvm::PrimExpr b) { ComputeRule; }; \ + return tvm::top::compute(A->shape, [&](const ::tvm::Array<::tvm::Var>& i) { \ return l(A(i), B); \ }, name, tag); \ } \ - inline tvm::Tensor Name(const tvm::PrimExpr& A, \ - const tvm::Tensor& B, \ - std::string name = "T_" #Name, \ - std::string tag = kElementWise) { \ + inline tvm::top::Tensor Name(const tvm::PrimExpr& A, \ + const tvm::top::Tensor& B, \ + std::string name = "T_" #Name, \ + std::string tag = kElementWise) { \ auto l = [&](tvm::PrimExpr a, tvm::PrimExpr b) { ComputeRule; }; \ - return compute(B->shape, [&](const ::tvm::Array<::tvm::Var>& i) { \ + return tvm::top::compute(B->shape, [&](const ::tvm::Array<::tvm::Var>& i) { \ return l(A, B(i)); \ }, name, tag); \ } #define TOPI_DEFINE_OP_OVERLOAD(Name, OpName) \ - inline tvm::Tensor Name(const tvm::Tensor& A, \ - const tvm::Tensor& B) { \ + inline tvm::top::Tensor Name(const tvm::top::Tensor& A, \ + const tvm::top::Tensor& B) { \ return topi::OpName(A, B); \ } \ - inline tvm::Tensor Name(const tvm::PrimExpr& A, \ - const tvm::Tensor& B) { \ + inline tvm::top::Tensor Name(const tvm::PrimExpr& A, \ + const tvm::top::Tensor& B) { \ return topi::OpName(A, B); \ } \ - inline tvm::Tensor Name(const tvm::Tensor& A, \ - const tvm::PrimExpr& B) { \ + inline tvm::top::Tensor Name(const tvm::top::Tensor& A, \ + const tvm::PrimExpr& B) { \ return topi::OpName(A, B); \ } diff --git a/topi/include/topi/contrib/cublas.h b/topi/include/topi/contrib/cublas.h index c64490330f42..3766ee300132 100644 --- a/topi/include/topi/contrib/cublas.h +++ b/topi/include/topi/contrib/cublas.h @@ -30,6 +30,7 @@ namespace topi { namespace contrib { using namespace tvm; +using namespace tvm::top; using namespace topi::detail; /*! * \brief Create an op that multiplies lhs and rhs with cuBLAS diff --git a/topi/include/topi/contrib/rocblas.h b/topi/include/topi/contrib/rocblas.h index a61499dc488f..e061e44f8f90 100644 --- a/topi/include/topi/contrib/rocblas.h +++ b/topi/include/topi/contrib/rocblas.h @@ -30,6 +30,7 @@ namespace topi { namespace contrib { using namespace tvm; +using namespace tvm::top; /*! * \brief Create an op that multiplies lhs and rhs with rocBLAS * diff --git a/topi/include/topi/cuda/dense.h b/topi/include/topi/cuda/dense.h index 781258afa982..5a55c38d1127 100644 --- a/topi/include/topi/cuda/dense.h +++ b/topi/include/topi/cuda/dense.h @@ -34,6 +34,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; namespace cuda { /*! @@ -47,10 +48,10 @@ namespace cuda { * * \return Tensor with shape [batch, out_dim] */ -inline tvm::Tensor dense_cuda(const Target& target, - const tvm::Tensor& data, - const tvm::Tensor& weight, - const tvm::Tensor& bias, +inline tvm::top::Tensor dense_cuda(const Target& target, + const tvm::top::Tensor& data, + const tvm::top::Tensor& weight, + const tvm::top::Tensor& bias, const DataType& out_dtype) { CHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data"; CHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight"; @@ -66,7 +67,7 @@ inline tvm::Tensor dense_cuda(const Target& target, CHECK_EQ(data->dtype, out_dtype) << "Mixed precision not supported."; auto mm = topi::contrib::cublas_matmul(data, weight, false, true); if (bias.defined()) { - mm = tvm::compute({ batch, out_dim }, + mm = tvm::top::compute({ batch, out_dim }, [&](Var i, Var j) { return mm(i, j) + bias(j); }, "tensor", kBroadcast); diff --git a/topi/include/topi/cuda/injective.h b/topi/include/topi/cuda/injective.h index 663bc1fcbeb2..1bcdf91e7047 100644 --- a/topi/include/topi/cuda/injective.h +++ b/topi/include/topi/cuda/injective.h @@ -31,6 +31,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; namespace cuda { @@ -39,7 +40,7 @@ namespace cuda { * * \param sch The schedule to update. * \param out The tensor representing the injective op. - * + * * \return The updated schedule. */ inline Schedule schedule_injective_from_existing(Schedule sch, const Tensor& out) { @@ -67,7 +68,7 @@ inline Schedule schedule_injective(const Target &target, const Array& ou out_ops.push_back(t->op); } auto s = create_schedule(out_ops); - tvm::schedule::AutoInlineInjective(s); + tvm::top::AutoInlineInjective(s); for (auto out : outs) { schedule_injective_from_existing(s, out); } diff --git a/topi/include/topi/cuda/normalization.h b/topi/include/topi/cuda/normalization.h index 302578644555..f6d7c255b707 100644 --- a/topi/include/topi/cuda/normalization.h +++ b/topi/include/topi/cuda/normalization.h @@ -30,6 +30,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; namespace cuda { /*! * \brief Create a CUDA schedule for LRN diff --git a/topi/include/topi/cuda/pooling.h b/topi/include/topi/cuda/pooling.h index df4ab3362634..ec06371b4cd9 100644 --- a/topi/include/topi/cuda/pooling.h +++ b/topi/include/topi/cuda/pooling.h @@ -32,6 +32,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; namespace cuda { diff --git a/topi/include/topi/cuda/reduction.h b/topi/include/topi/cuda/reduction.h index a82b36306617..86f0fe86fe54 100644 --- a/topi/include/topi/cuda/reduction.h +++ b/topi/include/topi/cuda/reduction.h @@ -31,6 +31,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; namespace cuda { /*! diff --git a/topi/include/topi/cuda/softmax.h b/topi/include/topi/cuda/softmax.h index 33be89922fea..b2ba0bcbafd4 100644 --- a/topi/include/topi/cuda/softmax.h +++ b/topi/include/topi/cuda/softmax.h @@ -31,6 +31,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; namespace cuda { @@ -50,9 +51,9 @@ inline Schedule schedule_softmax(const Target &target, const Array& outs auto s = create_schedule(out_ops); auto softmax = outs[0]; - tvm::Tensor max_elem; - tvm::Tensor expsum; - tvm::Tensor exp; + tvm::top::Tensor max_elem; + tvm::top::Tensor expsum; + tvm::top::Tensor exp; bool has_exp = false; auto tag = softmax->op.as()->tag; diff --git a/topi/include/topi/detail/array_utils.h b/topi/include/topi/detail/array_utils.h index 93e7c3e156fb..07cce9af693e 100644 --- a/topi/include/topi/detail/array_utils.h +++ b/topi/include/topi/detail/array_utils.h @@ -29,6 +29,7 @@ namespace topi { namespace detail { using namespace tvm; +using namespace tvm::top; /*! * \brief Search an array for a specific item diff --git a/topi/include/topi/detail/broadcast.h b/topi/include/topi/detail/broadcast.h index 2d326e7e1e07..dc585439ce89 100644 --- a/topi/include/topi/detail/broadcast.h +++ b/topi/include/topi/detail/broadcast.h @@ -100,7 +100,7 @@ inline BroadcastHelper BroadcastShape(const tvm::Array& shape1, inline tvm::Array InputIndexFromBroadcast( const tvm::Array& ovars, - const tvm::Tensor& T, + const tvm::top::Tensor& T, const std::deque& my_vars, const std::deque& all_vars) { tvm::Array ivars; @@ -127,9 +127,9 @@ inline tvm::Array InputIndexFromBroadcast( } template -inline tvm::Tensor WithBroadcast(FBinaryExpr op, - const tvm::Tensor& A, - const tvm::Tensor& B, +inline tvm::top::Tensor WithBroadcast(FBinaryExpr op, + const tvm::top::Tensor& A, + const tvm::top::Tensor& B, const std::string& name = "tensor", const std::string& tag = "") { auto bh = BroadcastShape(A->shape, B->shape); @@ -137,7 +137,7 @@ inline tvm::Tensor WithBroadcast(FBinaryExpr op, return op(A(InputIndexFromBroadcast(ovars, A, bh.vars1, bh.all_vars)), B(InputIndexFromBroadcast(ovars, B, bh.vars2, bh.all_vars))); }; - return tvm::compute( + return tvm::top::compute( tvm::Array(bh.common_shape.begin(), bh.common_shape.end()), l, name, diff --git a/topi/include/topi/detail/constant_utils.h b/topi/include/topi/detail/constant_utils.h index e6de76f20881..210049344404 100644 --- a/topi/include/topi/detail/constant_utils.h +++ b/topi/include/topi/detail/constant_utils.h @@ -33,6 +33,7 @@ namespace topi { namespace detail { using namespace tvm; +using namespace tvm::top; /*! * \brief Test whether the given Expr is a constant integer diff --git a/topi/include/topi/detail/extern.h b/topi/include/topi/detail/extern.h index 8bdda802ad7a..532b704d6fac 100644 --- a/topi/include/topi/detail/extern.h +++ b/topi/include/topi/detail/extern.h @@ -32,6 +32,7 @@ namespace topi { namespace detail { using namespace tvm; +using namespace tvm::top; /*! * \brief Construct a buffer to pass to an external function diff --git a/topi/include/topi/detail/fuse.h b/topi/include/topi/detail/fuse.h index d573a3fcb18a..91f40ad0ae7b 100644 --- a/topi/include/topi/detail/fuse.h +++ b/topi/include/topi/detail/fuse.h @@ -29,6 +29,7 @@ namespace topi { namespace detail { using namespace tvm; +using namespace tvm::top; /*! * \brief Fuse all of the given args diff --git a/topi/include/topi/detail/pad_utils.h b/topi/include/topi/detail/pad_utils.h index ec757e9954fe..12b15413e72a 100644 --- a/topi/include/topi/detail/pad_utils.h +++ b/topi/include/topi/detail/pad_utils.h @@ -32,6 +32,7 @@ namespace topi { namespace detail { using namespace tvm; +using namespace tvm::top; /*! * \brief Get padding size for each side given padding height and width diff --git a/topi/include/topi/detail/ravel_unravel.h b/topi/include/topi/detail/ravel_unravel.h index 5526a7dac7c2..40ebf11e4e5c 100644 --- a/topi/include/topi/detail/ravel_unravel.h +++ b/topi/include/topi/detail/ravel_unravel.h @@ -32,6 +32,7 @@ namespace topi { namespace detail { using namespace tvm; +using namespace tvm::top; /*! * \brief Flatten the indices to 1D diff --git a/topi/include/topi/detail/tensor_utils.h b/topi/include/topi/detail/tensor_utils.h index e52452e89d33..e306880284db 100644 --- a/topi/include/topi/detail/tensor_utils.h +++ b/topi/include/topi/detail/tensor_utils.h @@ -28,6 +28,7 @@ namespace topi { namespace detail { using namespace tvm; +using namespace tvm::top; /*! * \brief Check whether input shape has dimension of size 0; diff --git a/topi/include/topi/elemwise.h b/topi/include/topi/elemwise.h index df7cff064383..46515e763226 100644 --- a/topi/include/topi/elemwise.h +++ b/topi/include/topi/elemwise.h @@ -33,6 +33,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; // Unary intrinsic operators #define TOPI_DECLARE_UNARY_OP(OpName) \ diff --git a/topi/include/topi/generic/default.h b/topi/include/topi/generic/default.h index 9dff8d53d6af..931631ea7a74 100644 --- a/topi/include/topi/generic/default.h +++ b/topi/include/topi/generic/default.h @@ -31,6 +31,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; namespace generic { /*! @@ -66,7 +67,7 @@ inline Schedule default_schedule_auto_inline(const Target& target, Array } auto s = create_schedule(out_ops); auto x = outs[0]; - tvm::schedule::AutoInlineInjective(s); + tvm::top::AutoInlineInjective(s); auto axis = s[x]->op.as()->axis; if (axis.size() > 0) { detail::Fuse(s[x], axis); diff --git a/topi/include/topi/generic/extern.h b/topi/include/topi/generic/extern.h index 03e362c399df..0d685c63d5b5 100644 --- a/topi/include/topi/generic/extern.h +++ b/topi/include/topi/generic/extern.h @@ -32,6 +32,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; namespace generic { /*! @@ -49,7 +50,7 @@ inline Schedule schedule_extern(const Target& target, Array outs) { } auto s = create_schedule(out_ops); - tvm::schedule::AutoInlineInjective(s); + tvm::top::AutoInlineInjective(s); for (auto out : outs) { if (out->op->IsInstance()) { continue; diff --git a/topi/include/topi/generic/injective.h b/topi/include/topi/generic/injective.h index fa7df4ccfd46..0913dda13c36 100644 --- a/topi/include/topi/generic/injective.h +++ b/topi/include/topi/generic/injective.h @@ -31,6 +31,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; namespace generic { @@ -39,7 +40,7 @@ namespace generic { * * \param sch The schedule to update. * \param out The tensor representing the injective op. - * + * * \return The updated schedule. */ inline Schedule schedule_injective_from_existing(Schedule sch, const Tensor& out) { @@ -61,7 +62,7 @@ inline Schedule schedule_injective(const Target &target, const Array& ou out_ops.push_back(t->op); } auto s = create_schedule(out_ops); - tvm::schedule::AutoInlineInjective(s); + tvm::top::AutoInlineInjective(s); auto x = outs[0]; schedule_injective_from_existing(s, x); diff --git a/topi/include/topi/image/resize.h b/topi/include/topi/image/resize.h index f169ec946636..c0075e06d168 100644 --- a/topi/include/topi/image/resize.h +++ b/topi/include/topi/image/resize.h @@ -39,6 +39,7 @@ namespace topi { namespace image { using namespace tvm; +using namespace tvm::top; /*! * \brief Sample a point in a tensor using bilinear interpolation. diff --git a/topi/include/topi/nn.h b/topi/include/topi/nn.h index 3f65c75a02bb..0c3a4ceaabde 100644 --- a/topi/include/topi/nn.h +++ b/topi/include/topi/nn.h @@ -36,6 +36,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; namespace detail { template @@ -61,11 +62,11 @@ tvm::PrimExpr Map(const tvm::Array& exprs, T op) { * \return A Tensor whose op member is the relu operation */ template -inline tvm::Tensor relu(const tvm::Tensor& t, +inline tvm::top::Tensor relu(const tvm::top::Tensor& t, T threshold = static_cast(0), std::string name = "T_relu", std::string tag = kElementWise) { - return tvm::compute( + return tvm::top::compute( t->shape, [&](const tvm::Array& i) { auto threshold_const = tvm::make_const(t->dtype, threshold); @@ -85,11 +86,11 @@ inline tvm::Tensor relu(const tvm::Tensor& t, * * \return A Tensor whose op member is the leaky relu operation */ -inline tvm::Tensor leaky_relu(const tvm::Tensor& t, +inline tvm::top::Tensor leaky_relu(const tvm::top::Tensor& t, double alpha = 0.1, std::string name = "T_leaky_relu", std::string tag = kElementWise) { - return tvm::compute( + return tvm::top::compute( t->shape, [&](const tvm::Array& i) { auto value = t(i); @@ -111,8 +112,8 @@ inline tvm::Tensor leaky_relu(const tvm::Tensor& t, * * \return A Tensor whose op member is the parametric relu operation */ -inline tvm::Tensor prelu(const tvm::Tensor &x, - const tvm::Tensor &slope, +inline tvm::top::Tensor prelu(const tvm::top::Tensor &x, + const tvm::top::Tensor &slope, const int axis = 1, std::string name = "T_prelu", std::string tag = kBroadcast) { @@ -122,7 +123,7 @@ inline tvm::Tensor prelu(const tvm::Tensor &x, topi::detail::GetConstInt(x->shape[axis])) << "Wrong slope shape received."; - return tvm::compute(x->shape, + return tvm::top::compute(x->shape, [&](const tvm::Array &indices) { auto xval = x(indices); return tvm::ir::SelectNode::make( @@ -171,7 +172,7 @@ inline tvm::Tensor prelu(const tvm::Tensor &x, * * */ -inline tvm::Tensor pad(const tvm::Tensor& t, +inline tvm::top::Tensor pad(const tvm::top::Tensor& t, const tvm::Array& pad_before, tvm::Array pad_after = tvm::Array(), PrimExpr pad_value = PrimExpr(), @@ -251,7 +252,7 @@ inline tvm::Tensor pad(const tvm::Tensor& t, } return t(indices); }; - return tvm::compute(output_shape, l, name, tag); + return tvm::top::compute(output_shape, l, name, tag); } /*! @@ -274,8 +275,8 @@ inline tvm::Tensor pad(const tvm::Tensor& t, * \return A Tensor whose op member is the 2-D convolution operation (NCHW * layout) */ -inline tvm::Tensor conv2d_nchw(const tvm::Tensor& I, - const tvm::Tensor& W, +inline tvm::top::Tensor conv2d_nchw(const tvm::top::Tensor& I, + const tvm::top::Tensor& W, int pad_h = 0, int pad_w = 0, int stride_h = 1, @@ -303,7 +304,7 @@ inline tvm::Tensor conv2d_nchw(const tvm::Tensor& I, T(b, i, stride_h * h + kh, stride_w * w + kw) * W(o, i, kh, kw), {i, kh, kw}); }; - return tvm::compute(output_shape, l, name, tag); + return tvm::top::compute(output_shape, l, name, tag); } /*! @@ -325,8 +326,8 @@ inline tvm::Tensor conv2d_nchw(const tvm::Tensor& I, * \return A Tensor whose op member is the 2-D convolution operation * (HWCN layout) */ -inline tvm::Tensor conv2d_hwcn(const tvm::Tensor& I, - const tvm::Tensor& W, +inline tvm::top::Tensor conv2d_hwcn(const tvm::top::Tensor& I, + const tvm::top::Tensor& W, int pad_h = 0, int pad_w = 0, int stride_h = 1, @@ -352,7 +353,7 @@ inline tvm::Tensor conv2d_hwcn(const tvm::Tensor& I, T(stride_h * h + kh, stride_w * w + kw, i, b) * W(kh, kw, i, o), {i, kh, kw}); }; - return tvm::compute(output_shape, l, name, tag); + return tvm::top::compute(output_shape, l, name, tag); } @@ -376,8 +377,8 @@ inline tvm::Tensor conv2d_hwcn(const tvm::Tensor& I, * \return A Tensor whose op member is the 2-D depthwise convolution operation * (NCHW layout) */ -inline tvm::Tensor depthwise_conv2d_nchw(const tvm::Tensor& I, - const tvm::Tensor& W, +inline tvm::top::Tensor depthwise_conv2d_nchw(const tvm::top::Tensor& I, + const tvm::top::Tensor& W, int pad_h = 0, int pad_w = 0, int stride_h = 1, @@ -406,11 +407,11 @@ inline tvm::Tensor depthwise_conv2d_nchw(const tvm::Tensor& I, W(indexdiv(i, pCM), indexmod(o, pCM), kh, kw), {i, kh, kw}); }; - return tvm::compute(output_shape, l, name, tag); + return tvm::top::compute(output_shape, l, name, tag); } -inline tvm::Tensor depthwise_conv2d_nhwc(const tvm::Tensor& I, - const tvm::Tensor& W, +inline tvm::top::Tensor depthwise_conv2d_nhwc(const tvm::top::Tensor& I, + const tvm::top::Tensor& W, int pad_h = 0, int pad_w = 0, int stride_h = 1, @@ -439,7 +440,7 @@ inline tvm::Tensor depthwise_conv2d_nhwc(const tvm::Tensor& I, W(kh, kw, indexdiv(i, pCM), indexmod(o, pCM)), {kh, kw, i}); }; - return tvm::compute(output_shape, l, name, tag); + return tvm::top::compute(output_shape, l, name, tag); } /*! @@ -462,8 +463,8 @@ inline tvm::Tensor depthwise_conv2d_nhwc(const tvm::Tensor& I, * \return A Tensor whose op member is the 2-D groupconvolution operation * (NCHW layout) */ -inline tvm::Tensor group_conv2d_ngchw(const tvm::Tensor& I, - const tvm::Tensor& W, +inline tvm::top::Tensor group_conv2d_ngchw(const tvm::top::Tensor& I, + const tvm::top::Tensor& W, int pad_h = 0, int pad_w = 0, int stride_h = 1, @@ -498,7 +499,7 @@ inline tvm::Tensor group_conv2d_ngchw(const tvm::Tensor& I, I(b, g, i, stride_h * h + kh, stride_w * w + kw) * W(g, i, o, kh, kw), {i, kh, kw}); }; - return tvm::compute(output_shape, l, name, tag); + return tvm::top::compute(output_shape, l, name, tag); } } // namespace topi diff --git a/topi/include/topi/nn/batch_matmul.h b/topi/include/topi/nn/batch_matmul.h index 60b4175d65f8..d31b831fcbbc 100644 --- a/topi/include/topi/nn/batch_matmul.h +++ b/topi/include/topi/nn/batch_matmul.h @@ -32,6 +32,7 @@ namespace topi { namespace nn { using namespace tvm; +using namespace tvm::top; /*! * \brief Creates an operation that calculates matrix multiplication in batch. @@ -41,8 +42,8 @@ using namespace tvm; * * \return Tensor with shape [batch, M, N] */ -inline tvm::Tensor batch_matmul(const tvm::Tensor& x, - const tvm::Tensor& y) { +inline tvm::top::Tensor batch_matmul(const tvm::top::Tensor& x, + const tvm::top::Tensor& y) { CHECK_EQ(x->shape.size(), 3) << "batch_matmul requires 3-D data"; CHECK_EQ(y->shape.size(), 3) << "batch_matmul requires 3-D data"; @@ -52,7 +53,7 @@ inline tvm::Tensor batch_matmul(const tvm::Tensor& x, auto N = y->shape[1]; auto k = tvm::reduce_axis(Range(0, K), "k"); - auto result = tvm::compute( + auto result = tvm::top::compute( { batch, M, N }, [&](Var b, Var i, Var j) { return tvm::sum(x(b, i, k) * y(b, j, k), { k }); diff --git a/topi/include/topi/nn/bias_add.h b/topi/include/topi/nn/bias_add.h index 148d892229db..1eb43a089044 100644 --- a/topi/include/topi/nn/bias_add.h +++ b/topi/include/topi/nn/bias_add.h @@ -43,7 +43,9 @@ namespace nn { * \param axis The axis to add the bias to. * \return Tensor with shape [batch, in_dim] */ -inline tvm::Tensor bias_add(const tvm::Tensor& data, const tvm::Tensor& bias, int axis) { +inline tvm::top::Tensor bias_add(const tvm::top::Tensor& data, + const tvm::top::Tensor& bias, + int axis) { int data_ndim = data->shape.size(); if (axis < 0) { axis += data_ndim; diff --git a/topi/include/topi/nn/bnn.h b/topi/include/topi/nn/bnn.h index e2af3ae61518..47f6d0c223b3 100644 --- a/topi/include/topi/nn/bnn.h +++ b/topi/include/topi/nn/bnn.h @@ -34,6 +34,7 @@ namespace topi { namespace nn { using namespace tvm; +using namespace tvm::top; /*! * \brief Binarization and bit-packing along a certain axis. @@ -46,7 +47,7 @@ using namespace tvm; * * \return Output tensor with dtype uint32 */ -inline tvm::Tensor binarize_pack(const tvm::Tensor& data, +inline tvm::top::Tensor binarize_pack(const tvm::top::Tensor& data, int axis, std::string name = "PackedInput", std::string tag = "binarize_pack") { @@ -62,7 +63,7 @@ inline tvm::Tensor binarize_pack(const tvm::Tensor& data, ishape[i]); } - return tvm::compute( + return tvm::top::compute( oshape, [&](const Array& indices) { Array start_idx; @@ -98,8 +99,8 @@ inline tvm::Tensor binarize_pack(const tvm::Tensor& data, * * \return Tensor with shape [batch, out_dim], dtype is float32 */ -inline tvm::Tensor binary_dense(const tvm::Tensor& data, - const tvm::Tensor& weight) { +inline tvm::top::Tensor binary_dense(const tvm::top::Tensor& data, + const tvm::top::Tensor& weight) { CHECK_EQ(data->shape.size(), 2) << "binary_dense requires 2-D data"; CHECK_EQ(weight->shape.size(), 2) << "binary_dense requires 2-D weight"; CHECK_EQ(data->dtype, DataType::UInt(32)) << "binary_dense requires uint32 data"; @@ -110,13 +111,13 @@ inline tvm::Tensor binary_dense(const tvm::Tensor& data, auto out_dim = weight->shape[0]; auto k = tvm::reduce_axis(Range(0, in_dim), "k"); - auto matmul = tvm::compute( + auto matmul = tvm::top::compute( { batch, out_dim }, [&](Var i, Var j) { return tvm::sum(popcount(data(i, k) ^ weight(j, k)), { k }); }, "tensor", "binary_dense"); - return tvm::compute( + return tvm::top::compute( { batch, out_dim }, [&](Var i, Var j) { return 32 * in_dim - 2.0f * matmul(i, j); diff --git a/topi/include/topi/nn/dense.h b/topi/include/topi/nn/dense.h index f1bb07abf262..68c50f64e076 100644 --- a/topi/include/topi/nn/dense.h +++ b/topi/include/topi/nn/dense.h @@ -32,6 +32,7 @@ namespace topi { namespace nn { using namespace tvm; +using namespace tvm::top; /*! * \brief Creates an operation that calculates data * weight^T + bias @@ -43,9 +44,9 @@ using namespace tvm; * * \return Tensor with shape [batch, out_dim] */ -inline tvm::Tensor dense(const tvm::Tensor& data, - const tvm::Tensor& weight, - const tvm::Tensor& bias, +inline tvm::top::Tensor dense(const tvm::top::Tensor& data, + const tvm::top::Tensor& weight, + const tvm::top::Tensor& bias, const DataType& out_dtype) { CHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data"; CHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight"; @@ -58,7 +59,7 @@ inline tvm::Tensor dense(const tvm::Tensor& data, auto out_dim = weight->shape[0]; auto k = tvm::reduce_axis(Range(0, in_dim), "k"); - auto matmul = tvm::compute( + auto matmul = tvm::top::compute( { batch, out_dim }, [&](Var i, Var j) { return tvm::sum(tvm::cast(out_dtype, data(i, k)) * @@ -66,7 +67,7 @@ inline tvm::Tensor dense(const tvm::Tensor& data, }, "tensor", "dense"); if (bias.defined()) { - matmul = tvm::compute( + matmul = tvm::top::compute( { batch, out_dim }, [&](Var i, Var j) { return matmul(i, j) + tvm::cast(out_dtype, bias(j)); diff --git a/topi/include/topi/nn/dilate.h b/topi/include/topi/nn/dilate.h index 334b17054c81..8dc3d1516f6a 100644 --- a/topi/include/topi/nn/dilate.h +++ b/topi/include/topi/nn/dilate.h @@ -33,6 +33,7 @@ namespace topi { namespace nn { using namespace tvm; +using namespace tvm::top; /*! * \brief Create a new expression of the logical and of all @@ -79,7 +80,7 @@ inline Tensor dilate(const Tensor& x, (x->shape[i] - 1) * cast(DataType::Int(32), strides[i] + 1))); } - return tvm::compute( + return tvm::top::compute( out_shape, [&](const Array& indices) { Array not_zero; diff --git a/topi/include/topi/nn/flatten.h b/topi/include/topi/nn/flatten.h index 6b542f7c2afe..510c304572ac 100644 --- a/topi/include/topi/nn/flatten.h +++ b/topi/include/topi/nn/flatten.h @@ -36,6 +36,7 @@ namespace topi { namespace nn { using namespace tvm; +using namespace tvm::top; /*! * \brief Flattens the input tensor into a 2-D tensor by collapsing higher dimensions. @@ -64,7 +65,7 @@ inline Tensor flatten(const Tensor& x, } std::reverse(extra_shape.begin(), extra_shape.end()); - return tvm::compute( + return tvm::top::compute( oshape, [&](Var i, Var j) { PrimExpr idx = j; std::vector index; diff --git a/topi/include/topi/nn/l2_normalize.h b/topi/include/topi/nn/l2_normalize.h index 3f603594c5a9..5ad32a6c65da 100644 --- a/topi/include/topi/nn/l2_normalize.h +++ b/topi/include/topi/nn/l2_normalize.h @@ -31,6 +31,7 @@ namespace topi { namespace nn { using namespace tvm; +using namespace tvm::top; /*! * \brief L2 normalization inference operator @@ -59,7 +60,7 @@ inline Tensor l2_normalize(const Tensor& data, Tensor sum_value = topi::sum(dot_value, axis, true); Tensor expand_sum = topi::broadcast_to(sum_value, input_shape); return topi::divide(data, - topi::sqrt(tvm::compute(expand_sum->shape, + topi::sqrt(tvm::top::compute(expand_sum->shape, [&](const Array& i){ return (max(expand_sum(i), eps)); }, name, tag))); diff --git a/topi/include/topi/nn/local_response_norm.h b/topi/include/topi/nn/local_response_norm.h index 0cce997c2009..c9836e265dca 100644 --- a/topi/include/topi/nn/local_response_norm.h +++ b/topi/include/topi/nn/local_response_norm.h @@ -32,6 +32,7 @@ namespace topi { namespace nn { using namespace tvm; +using namespace tvm::top; /*! * \brief Local response normalization inference operator @@ -67,21 +68,21 @@ inline Tensor lrn(const Tensor& data, auto rxs = tvm::reduce_axis(Range(0, size), "rxs"); Tensor sqr_sum; if (axis == 1) { - sqr_sum = tvm::compute(input_shape, + sqr_sum = tvm::top::compute(input_shape, [&](Var i, Var l, Var j, Var k) { return tvm::sum(pad_data(i, l + rxs, j, k) * pad_data(i, l + rxs, j, k), {rxs}); }); } else if (axis == 3) { - sqr_sum = tvm::compute(input_shape, + sqr_sum = tvm::top::compute(input_shape, [&](Var i, Var l, Var j, Var k) { return tvm::sum(pad_data(i, l, j, k + rxs) * pad_data(i, l, j, k + rxs), {rxs}); }); } - auto sqrt_sum_up = tvm::compute( + auto sqrt_sum_up = tvm::top::compute( input_shape, [&](Var i, Var j, Var k, Var l) { return tvm::pow(bias + diff --git a/topi/include/topi/nn/mapping.h b/topi/include/topi/nn/mapping.h index 03043ff9fe19..156696499819 100644 --- a/topi/include/topi/nn/mapping.h +++ b/topi/include/topi/nn/mapping.h @@ -32,6 +32,7 @@ namespace topi { namespace nn { using namespace tvm; +using namespace tvm::top; /*! * \brief Scale and shift with NCHW order @@ -49,7 +50,7 @@ inline Tensor scale_shift_nchw(const Tensor& x, const Tensor& shift, std::string name = "ScaleShift", std::string tag = kBroadcast) { - return tvm::compute( + return tvm::top::compute( x->shape, [&](Var b, Var c, Var h, Var w) { return x(b, c, h, w) * scale(c) + shift(w); @@ -72,7 +73,7 @@ inline Tensor scale_shift_nhwc(const Tensor& x, const Tensor& shift, std::string name = "ScaleShift", std::string tag = kBroadcast) { - return tvm::compute( + return tvm::top::compute( x->shape, [&](Var b, Var h, Var w, Var c) { return x(b, h, w, c) * scale(c) + shift(w); diff --git a/topi/include/topi/nn/pooling.h b/topi/include/topi/nn/pooling.h index a074ee1f6ef9..ac284a0e01fe 100644 --- a/topi/include/topi/nn/pooling.h +++ b/topi/include/topi/nn/pooling.h @@ -37,6 +37,7 @@ namespace topi { namespace nn { using namespace tvm; +using namespace tvm::top; /*! \brief Pooling type */ enum PoolType : int { @@ -124,7 +125,7 @@ inline Tensor pool_impl(const Tensor& x, if (pool_type == kMaxPool) { auto temp = do_pad ? pad( x, pad_before, pad_after, tvm::min_value(x->dtype), "pad_temp") : x; - return tvm::compute(out_shape, [&](const Array& output) { + return tvm::top::compute(out_shape, [&](const Array& output) { Array indices; for (const Var& var : output) indices.push_back(var); indices.Set(height_axis, output[height_axis] * stride_height + dheight); @@ -136,7 +137,7 @@ inline Tensor pool_impl(const Tensor& x, auto temp = do_pad ? pad(x, pad_before, pad_after, 0, "pad_temp") : x; // TVM compute for summing the pooling window. - auto pool_sum = tvm::compute(out_shape, + auto pool_sum = tvm::top::compute(out_shape, [&](const Array& output) { Array indices; for (const Var& var : output) indices.push_back(var); @@ -146,7 +147,7 @@ inline Tensor pool_impl(const Tensor& x, }, "tensor", "pool_sum"); // TVM compute for dividing the reduced window sum by kernel size. - return tvm::compute(out_shape, + return tvm::top::compute(out_shape, [&](const Array& output) { Array indices; for (const Var& var : output) indices.push_back(var); @@ -244,7 +245,7 @@ inline Tensor pool_grad_impl(const Tensor& out_grad, x, pad_before, pad_after, tvm::min_value(x->dtype), "pad_temp") : x; auto mp_argmax = - tvm::compute(out_shape, + tvm::top::compute(out_shape, [&](const Array& inds) { Array window_inds{inds.begin(), inds.end()}; window_inds.Set(height_axis, inds[height_axis] * stride_height + dheight); @@ -256,7 +257,7 @@ inline Tensor pool_grad_impl(const Tensor& out_grad, auto mp_inds = mp_argmax[0]; - return tvm::compute( + return tvm::top::compute( x->shape, [&](const Array& inds) { Array pad_inds {inds.begin(), inds.end()}; @@ -287,7 +288,7 @@ inline Tensor pool_grad_impl(const Tensor& out_grad, } else if (pool_type == kAvgPool) { auto windowh = tvm::reduce_axis(Range(0, (kernel_height + stride_height - 1) / stride_height)); auto windoww = tvm::reduce_axis(Range(0, (kernel_width + stride_width - 1) / stride_width)); - return tvm::compute( + return tvm::top::compute( x->shape, [&](const Array& inds) { PrimExpr pad_h_idx = inds[height_axis] + pad_top; @@ -512,7 +513,7 @@ inline Tensor adaptive_pool_impl(const Tensor& x, out_shape.Set(width_axis, out_width); if (pool_type == kMaxPool) { - return tvm::compute(out_shape, [&](const Array& output) { + return tvm::top::compute(out_shape, [&](const Array& output) { Array indices; for (const Var& var : output) indices.push_back(var); auto i_start_h = start_index(output[height_axis], out_height, height); @@ -526,7 +527,7 @@ inline Tensor adaptive_pool_impl(const Tensor& x, return tvm::max(x(indices), { dheight, dwidth }); // NOLINT(*) }, "tensor", "adaptive_pool_max"); } else if (pool_type == kAvgPool) { - auto pool_sum = tvm::compute(out_shape, [&](const Array& output) { + auto pool_sum = tvm::top::compute(out_shape, [&](const Array& output) { Array indices; for (const Var& var : output) indices.push_back(var); auto i_start_h = start_index(output[height_axis], out_height, height); @@ -542,7 +543,7 @@ inline Tensor adaptive_pool_impl(const Tensor& x, return tvm::sum(x(indices), { dheight, dwidth }); }, "tensor", "adaptive_pool_sum"); - return tvm::compute(out_shape, [&](const Array& output) { + return tvm::top::compute(out_shape, [&](const Array& output) { Array indices; for (const Var& var : output) indices.push_back(var); auto i_start_h = start_index(output[height_axis], out_height, height); @@ -696,7 +697,7 @@ inline Tensor pool_impl_nd(const Tensor& x, if (pool_type == kMaxPool) { auto temp = do_pad ? pad( x, pad_before, pad_after, tvm::min_value(x->dtype), "pad_temp") : x; - return tvm::compute(out_shape, [&](const Array& output) { + return tvm::top::compute(out_shape, [&](const Array& output) { Array indices; for (const Var& var : output) indices.push_back(var); @@ -712,7 +713,7 @@ inline Tensor pool_impl_nd(const Tensor& x, auto temp = do_pad ? pad(x, pad_before, pad_after, 0, "pad_temp") : x; // TVM compute for summing the pooling window. - auto pool_sum = tvm::compute(out_shape, + auto pool_sum = tvm::top::compute(out_shape, [&](const Array& output) { Array indices; for (const Var& var : output) indices.push_back(var); @@ -725,7 +726,7 @@ inline Tensor pool_impl_nd(const Tensor& x, }, "tensor", "pool_sum"); // TVM compute for dividing the reduced window sum by kernel size. - return tvm::compute(out_shape, + return tvm::top::compute(out_shape, [&](const Array& output) { Array indices; for (const Var& var : output) indices.push_back(var); diff --git a/topi/include/topi/nn/softmax.h b/topi/include/topi/nn/softmax.h index 58ecc956964d..0ae5c106c50b 100644 --- a/topi/include/topi/nn/softmax.h +++ b/topi/include/topi/nn/softmax.h @@ -35,6 +35,7 @@ namespace topi { namespace nn { using namespace tvm; +using namespace tvm::top; /*! * \brief Softmax activation @@ -109,14 +110,14 @@ inline Tensor softmax(const Tensor &x, return exp(indices) / expsum(non_reduce_indices); }; - auto max_elem = tvm::compute(reduced_shape, _compute_max); - auto exp = tvm::compute(input_shape, [&](const Array &indices) { + auto max_elem = tvm::top::compute(reduced_shape, _compute_max); + auto exp = tvm::top::compute(input_shape, [&](const Array &indices) { return _compute_exp(max_elem, indices); }); - auto expsum = tvm::compute(reduced_shape, [&](const Array &indices) { + auto expsum = tvm::top::compute(reduced_shape, [&](const Array &indices) { return _compute_expsum(exp, indices); }); - return tvm::compute(input_shape, [&](const Array &indices) { + return tvm::top::compute(input_shape, [&](const Array &indices) { return _normalize(exp, expsum, indices); }, name, tag, attrs); } @@ -139,16 +140,16 @@ inline Tensor log_softmax(const Tensor& x, PrimExpr n = x->shape[1]; auto k = tvm::reduce_axis(Range(0, n), "k"); - auto max_elem = tvm::compute( + auto max_elem = tvm::top::compute( { m }, [&](Var i) { return tvm::max(x(i, k), Array{ k }); }); k = tvm::reduce_axis(Range(0, n), "k"); - auto expsum = tvm::compute( + auto expsum = tvm::top::compute( { m }, [&](Var i) { return tvm::sum(tvm::exp(x(i, k) - max_elem(i)), { k }); }); - return tvm::compute( + return tvm::top::compute( x->shape, [&](Var i, Var j) { return x(i, j) - max_elem(i) - tvm::log(expsum(i)); }, name, tag); diff --git a/topi/include/topi/nn/upsampling.h b/topi/include/topi/nn/upsampling.h index b6230c7c017c..f624f3687a40 100644 --- a/topi/include/topi/nn/upsampling.h +++ b/topi/include/topi/nn/upsampling.h @@ -34,6 +34,7 @@ namespace topi { namespace nn { using namespace tvm; +using namespace tvm::top; using namespace topi::image; /*! diff --git a/topi/include/topi/reduction.h b/topi/include/topi/reduction.h index ac843b1a7077..fa405f94b8bc 100644 --- a/topi/include/topi/reduction.h +++ b/topi/include/topi/reduction.h @@ -41,6 +41,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; /*! \brief The operation to use for CommReduce */ using FReduce = std::function& axis)>; @@ -167,7 +168,7 @@ inline Tensor DoCommReduce(const Tensor& data, return func(data(eval_range), r_axes); }; - return tvm::compute(target_shape, compute, data->op->name + "_red", kCommReduce); + return tvm::top::compute(target_shape, compute, data->op->name + "_red", kCommReduce); } /*! @@ -251,11 +252,11 @@ inline Tensor CommReduceIdx(const Tensor& data, return func({ idx, data(eval_range) }, reduce_axes, nullptr); }; - auto temp_idx_val = tvm::compute(target_shape, compute, + auto temp_idx_val = tvm::top::compute(target_shape, compute, data->op->name + "_red_temp", kCommReduceIdx); auto temp_idx = temp_idx_val[0]; auto temp_val = temp_idx_val[1]; - return tvm::compute( + return tvm::top::compute( target_shape, [&temp_idx](const Array& indices) { return temp_idx(indices); }, data->op->name + "_red", diff --git a/topi/include/topi/rocm/dense.h b/topi/include/topi/rocm/dense.h index 7eb9f9a51962..b3146db6c964 100644 --- a/topi/include/topi/rocm/dense.h +++ b/topi/include/topi/rocm/dense.h @@ -35,6 +35,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; namespace rocm { /*! @@ -48,10 +49,10 @@ namespace rocm { * * \return Tensor with shape [batch, out_dim] */ -inline tvm::Tensor dense_rocm(const Target& target, - const tvm::Tensor& data, - const tvm::Tensor& weight, - const tvm::Tensor& bias, +inline tvm::top::Tensor dense_rocm(const Target& target, + const tvm::top::Tensor& data, + const tvm::top::Tensor& weight, + const tvm::top::Tensor& bias, const DataType& out_dtype) { CHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data"; CHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight"; @@ -67,7 +68,7 @@ inline tvm::Tensor dense_rocm(const Target& target, CHECK_EQ(data->dtype, out_dtype) << "Mixed precision not supported."; auto mm = topi::contrib::rocblas_matmul(data, weight, false, true); if (bias.defined()) { - mm = tvm::compute({ batch, out_dim }, + mm = tvm::top::compute({ batch, out_dim }, [&](Var i, Var j) { return mm(i, j) + bias(j); }, "tensor", kBroadcast); diff --git a/topi/include/topi/rocm/injective.h b/topi/include/topi/rocm/injective.h index 7b367b713584..b521ff0cabb6 100644 --- a/topi/include/topi/rocm/injective.h +++ b/topi/include/topi/rocm/injective.h @@ -33,6 +33,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; namespace rocm { @@ -41,7 +42,7 @@ namespace rocm { * * \param sch The schedule to update. * \param out The tensor representing the injective op. - * + * * \return The updated schedule. */ inline Schedule schedule_injective_from_existing(Schedule sch, const Tensor& out) { diff --git a/topi/include/topi/rocm/normalization.h b/topi/include/topi/rocm/normalization.h index c6ffd328528a..ed33b69fce1f 100644 --- a/topi/include/topi/rocm/normalization.h +++ b/topi/include/topi/rocm/normalization.h @@ -30,6 +30,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; namespace rocm { /*! * \brief Create a rocm schedule for LRN diff --git a/topi/include/topi/rocm/pooling.h b/topi/include/topi/rocm/pooling.h index b0ab7cd40650..98ae6d80902a 100644 --- a/topi/include/topi/rocm/pooling.h +++ b/topi/include/topi/rocm/pooling.h @@ -34,6 +34,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; namespace rocm { diff --git a/topi/include/topi/rocm/reduction.h b/topi/include/topi/rocm/reduction.h index 22e7c7d48d34..89be35340535 100644 --- a/topi/include/topi/rocm/reduction.h +++ b/topi/include/topi/rocm/reduction.h @@ -33,6 +33,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; namespace rocm { /*! diff --git a/topi/include/topi/rocm/softmax.h b/topi/include/topi/rocm/softmax.h index fbc67eaf9f2d..4c6785b062ba 100644 --- a/topi/include/topi/rocm/softmax.h +++ b/topi/include/topi/rocm/softmax.h @@ -33,6 +33,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; namespace rocm { diff --git a/topi/include/topi/transform.h b/topi/include/topi/transform.h index 66e2773ded7e..7df17a1595f9 100644 --- a/topi/include/topi/transform.h +++ b/topi/include/topi/transform.h @@ -41,6 +41,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; using namespace topi::detail; /*! @@ -1041,8 +1042,8 @@ inline Tensor gather_nd(const Tensor& data, * * \return A Tensor whose op member is the matmul operation */ -inline tvm::Tensor matmul(const tvm::Tensor& A, - const tvm::Tensor& B, +inline tvm::top::Tensor matmul(const tvm::top::Tensor& A, + const tvm::top::Tensor& B, bool trans_a = false, bool trans_b = false, std::string name = "T_matmul", @@ -1054,7 +1055,7 @@ inline tvm::Tensor matmul(const tvm::Tensor& A, return tvm::sum((trans_a ? A[k][i] : A[i][k]) * (trans_b ? B[j][k] : B[k][j]), {k}); }; - return tvm::compute(output_shape, l, name, tag); + return tvm::top::compute(output_shape, l, name, tag); } /*! @@ -1069,7 +1070,7 @@ inline tvm::Tensor matmul(const tvm::Tensor& A, * \return A Tensor computing the result */ inline Tensor tensordot(const Tensor& A, - const tvm::Tensor& B, + const tvm::top::Tensor& B, int axes = 2, std::string name = "T_tensordot", std::string tag = kMatMul) { @@ -1124,7 +1125,7 @@ inline Tensor tensordot(const Tensor& A, * \return A Tensor computing the result */ inline Tensor tensordot(const Tensor& A, - const tvm::Tensor& B, + const tvm::top::Tensor& B, Array A_axes, Array B_axes, std::string name = "T_tensordot", diff --git a/topi/include/topi/vision/reorg.h b/topi/include/topi/vision/reorg.h index df3fadef7d75..bf01704f36e8 100644 --- a/topi/include/topi/vision/reorg.h +++ b/topi/include/topi/vision/reorg.h @@ -37,6 +37,7 @@ namespace topi { namespace vision { using namespace tvm; +using namespace tvm::top; /*! * \brief Reorg operation @@ -60,7 +61,7 @@ inline Tensor reorg(const Tensor &data, int w_in = GetConstInt(input_shape[3]); int out_c = c_in / (stride * stride); - auto out = tvm::compute(input_shape, + auto out = tvm::top::compute(input_shape, [&](Var b, Var k, Var j, Var i) { return data(b * stride * stride, indexmod(k, out_c) * stride * stride, diff --git a/topi/include/topi/x86/bnn.h b/topi/include/topi/x86/bnn.h index 1158749520c5..66c02e461fe9 100644 --- a/topi/include/topi/x86/bnn.h +++ b/topi/include/topi/x86/bnn.h @@ -31,6 +31,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; namespace x86 { /*! diff --git a/topi/include/topi/x86/default.h b/topi/include/topi/x86/default.h index 294871b535df..921d1474569c 100644 --- a/topi/include/topi/x86/default.h +++ b/topi/include/topi/x86/default.h @@ -31,6 +31,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; namespace x86 { /*! @@ -54,7 +55,7 @@ inline Schedule MakeDefaultSchedule(const Target &target, auto axis = s[x]->op.as()->axis; if (auto_inline) { - tvm::schedule::AutoInlineInjective(s); + tvm::top::AutoInlineInjective(s); if (axis.size() > 0) { detail::Fuse(s[x], axis); } diff --git a/topi/include/topi/x86/injective.h b/topi/include/topi/x86/injective.h index 7cb79ae322c2..c0a28222ceea 100644 --- a/topi/include/topi/x86/injective.h +++ b/topi/include/topi/x86/injective.h @@ -31,6 +31,7 @@ namespace topi { using namespace tvm; +using namespace tvm::top; namespace x86 { @@ -39,7 +40,7 @@ namespace x86 { * * \param sch The schedule to update. * \param out The tensor representing the injective op. - * + * * \return The updated schedule. */ inline Schedule schedule_injective_from_existing(Schedule sch, const Tensor& out) { @@ -69,7 +70,7 @@ inline Schedule schedule_injective(const Target &target, const Array& ou out_ops.push_back(t->op); } auto s = create_schedule(out_ops); - tvm::schedule::AutoInlineInjective(s); + tvm::top::AutoInlineInjective(s); auto x = outs[0]; schedule_injective_from_existing(s, x); diff --git a/topi/src/topi.cc b/topi/src/topi.cc index 21e61cfa4816..8197e891d154 100644 --- a/topi/src/topi.cc +++ b/topi/src/topi.cc @@ -93,7 +93,7 @@ Array ArrayOrInt(TVMArgValue arg) { inline bool IsTensorType(TVMArgValue arg) { return (arg.type_code() == kTVMObjectHandle && static_cast( - arg.value().v_handle)->IsInstance()); + arg.value().v_handle)->IsInstance()); } @@ -109,13 +109,13 @@ TVM_REGISTER_GLOBAL("topi.TEST_create_target") bool lhs_is_tensor = IsTensorType(args[0]); \ bool rhs_is_tensor = IsTensorType(args[1]); \ if (lhs_is_tensor && rhs_is_tensor) { \ - *rv = Op(args[0].operator tvm::Tensor(), \ - args[1].operator tvm::Tensor()); \ + *rv = Op(args[0].operator tvm::top::Tensor(), \ + args[1].operator tvm::top::Tensor()); \ } else if (!lhs_is_tensor && rhs_is_tensor) { \ *rv = Op(args[0].operator tvm::PrimExpr(), \ - args[1].operator tvm::Tensor()); \ + args[1].operator tvm::top::Tensor()); \ } else if (lhs_is_tensor && !rhs_is_tensor) { \ - *rv = Op(args[0].operator tvm::Tensor(), \ + *rv = Op(args[0].operator tvm::top::Tensor(), \ args[1].operator tvm::PrimExpr()); \ } else if (!lhs_is_tensor && !rhs_is_tensor) { \ *rv = Op(args[0].operator tvm::PrimExpr(), \ @@ -757,7 +757,7 @@ TVM_REGISTER_GLOBAL("topi.util.is_empty_shape") /*! \brief Builder function for instantiating schedules. */ using FTVMScheduleBuilder = std::function< - tvm::Schedule(const tvm::Target& target, const tvm::Array& outs)>; + tvm::top::Schedule(const tvm::Target& target, const tvm::Array& outs)>; /*! * \brief Helper function for registering generic functions matching the @@ -826,7 +826,7 @@ TVM_REGISTER_GENERIC_FUNC(schedule_binary_dense) /*! \brief Builder function for instantiating schedules from existing schedules. */ using FTVMScheduleFromExistingBuilder = std::function< - tvm::Schedule(tvm::Schedule sch, const tvm::Tensor& out)>; + tvm::top::Schedule(tvm::top::Schedule sch, const tvm::top::Tensor& out)>; /*! * \brief Helper function for registering generic functions matching the @@ -850,10 +850,10 @@ TVM_REGISTER_GENERIC_FUNC(schedule_injective_from_existing) topi::cuda::schedule_injective_from_existing)); /*! \brief Builder function for instantiating dense ops. */ -using FTVMDenseOpBuilder = std::function; /*! @@ -879,9 +879,9 @@ inline PackedFunc WrapDenseOp(FTVMDenseOpBuilder builder) { TVM_REGISTER_GENERIC_FUNC(dense) .set_default(WrapDenseOp([](const Target& target, - const tvm::Tensor& data, - const tvm::Tensor& weight, - const tvm::Tensor& bias, + const tvm::top::Tensor& data, + const tvm::top::Tensor& weight, + const tvm::top::Tensor& bias, const DataType& out_dtype) { return topi::nn::dense(data, weight, bias, out_dtype); })) From 4a55c90bd27d8c8636cc16ae213439020b8ca6fc Mon Sep 17 00:00:00 2001 From: tqchen Date: Thu, 16 Jan 2020 11:51:19 -0800 Subject: [PATCH 2/2] Move relevant files into include/tvm/top and src/top --- CMakeLists.txt | 14 +++++++---- include/tvm/arith/bound.h | 5 +++- include/tvm/build_module.h | 4 +++- include/tvm/ir_pass.h | 3 ++- include/tvm/lowered_func.h | 3 ++- include/tvm/packed_func_ext.h | 3 ++- include/tvm/relay/op_attr_types.h | 4 ++-- include/tvm/{ => top}/operation.h | 23 +++++++++++-------- include/tvm/{ => top}/schedule.h | 17 ++++++++------ include/tvm/{ => top}/schedule_pass.h | 10 ++++---- include/tvm/{ => top}/tensor.h | 13 ++++++----- include/tvm/{ => top}/tensor_intrin.h | 14 ++++++----- src/README.md | 9 ++++---- src/api/api_arith.cc | 2 +- src/api/api_base.cc | 2 +- src/api/api_lang.cc | 6 ++--- src/api/api_schedule.cc | 8 +++---- src/api/api_test.cc | 2 +- src/arith/domain_touched.cc | 7 ++++-- src/codegen/build_module.cc | 2 +- src/contrib/hybrid/codegen_hybrid.h | 2 +- src/pass/storage_flatten.cc | 2 +- src/pass/tensor_core.cc | 2 +- src/pass/verify_compact_buffer.cc | 2 +- src/relay/backend/compile_engine.cc | 4 ++-- src/relay/backend/utils.h | 2 +- src/relay/backend/vm/compiler.cc | 2 +- src/relay/op/tensor/transform.cc | 3 ++- src/relay/pass/alter_op_layout.cc | 2 +- src/relay/pass/convert_layout.cc | 2 +- src/relay/pass/gradient.cc | 2 +- src/relay/pass/legalize.cc | 2 +- src/{op => top/operation}/compute_op.cc | 6 ++--- src/{op => top/operation}/compute_op.h | 8 +++---- .../operation}/cross_thread_reduction.cc | 0 src/{op => top/operation}/extern_op.cc | 2 +- src/{op => top/operation}/hybrid_op.cc | 2 +- src/{op => top/operation}/hybrid_op.h | 15 ++++++------ src/{op => top/operation}/op_util.cc | 4 ++-- src/{op => top/operation}/op_util.h | 12 +++++----- src/{op => top/operation}/placeholder_op.cc | 2 +- src/{op => top/operation}/scan_op.cc | 2 +- .../operation}/tensor_compute_op.cc | 4 ++-- src/{op => top/operation}/tensorize.cc | 0 .../schedule/auto_inline_elem_wise.cc | 4 ++-- src/{ => top}/schedule/bound.cc | 6 ++--- src/{ => top}/schedule/graph.cc | 2 +- src/{ => top}/schedule/graph.h | 10 ++++---- src/{ => top}/schedule/message_passing.cc | 2 +- src/{ => top}/schedule/message_passing.h | 10 ++++---- .../schedule/schedule_dataflow_rewrite.cc | 8 +++---- src/{ => top}/schedule/schedule_lang.cc | 4 ++-- src/{ => top}/schedule/schedule_ops.cc | 8 +++---- src/{lang => top}/tensor.cc | 6 ++--- tests/cpp/build_module_test.cc | 2 +- tests/cpp/expr_test.cc | 2 +- tests/cpp/ir_simplify_test.cc | 2 +- tests/cpp/relay_build_module_test.cc | 2 +- tests/cpp/relay_pass_type_infer_test.cc | 2 +- tests/cpp/relay_transform_sequential.cc | 2 +- tests/cpp/simple_passes_test.cc | 2 +- tests/cpp/tensor_test.cc | 2 +- tests/cpp/topi_ewise_test.cc | 2 +- tests/cpp/utvm_runtime_standalone_test.cc | 2 +- topi/include/topi/contrib/cublas.h | 2 +- topi/include/topi/contrib/rocblas.h | 2 +- topi/include/topi/cuda/dense.h | 2 +- topi/include/topi/cuda/injective.h | 2 +- topi/include/topi/cuda/normalization.h | 2 +- topi/include/topi/cuda/pooling.h | 2 +- topi/include/topi/cuda/reduction.h | 2 +- topi/include/topi/cuda/softmax.h | 2 +- topi/include/topi/detail/array_utils.h | 2 +- topi/include/topi/detail/broadcast.h | 2 +- topi/include/topi/detail/extern.h | 2 +- topi/include/topi/detail/fuse.h | 2 +- topi/include/topi/detail/ravel_unravel.h | 2 +- topi/include/topi/generic/default.h | 2 +- topi/include/topi/generic/extern.h | 2 +- topi/include/topi/generic/injective.h | 2 +- topi/include/topi/image/resize.h | 2 +- topi/include/topi/nn.h | 2 +- topi/include/topi/nn/batch_matmul.h | 2 +- topi/include/topi/nn/bias_add.h | 2 +- topi/include/topi/nn/bnn.h | 2 +- topi/include/topi/nn/dense.h | 2 +- topi/include/topi/nn/dilate.h | 2 +- topi/include/topi/nn/flatten.h | 2 +- topi/include/topi/nn/l2_normalize.h | 2 +- topi/include/topi/nn/local_response_norm.h | 2 +- topi/include/topi/nn/mapping.h | 2 +- topi/include/topi/nn/softmax.h | 2 +- topi/include/topi/reduction.h | 2 +- topi/include/topi/rocm/dense.h | 2 +- topi/include/topi/rocm/injective.h | 2 +- topi/include/topi/rocm/normalization.h | 2 +- topi/include/topi/rocm/pooling.h | 2 +- topi/include/topi/rocm/reduction.h | 2 +- topi/include/topi/rocm/softmax.h | 2 +- topi/include/topi/transform.h | 2 +- topi/include/topi/vision/reorg.h | 2 +- topi/include/topi/x86/bnn.h | 2 +- topi/include/topi/x86/default.h | 2 +- topi/include/topi/x86/injective.h | 2 +- 104 files changed, 210 insertions(+), 185 deletions(-) rename include/tvm/{ => top}/operation.h (98%) rename include/tvm/{ => top}/schedule.h (99%) rename include/tvm/{ => top}/schedule_pass.h (93%) rename include/tvm/{ => top}/tensor.h (98%) rename include/tvm/{ => top}/tensor_intrin.h (96%) rename src/{op => top/operation}/compute_op.cc (99%) rename src/{op => top/operation}/compute_op.h (96%) rename src/{op => top/operation}/cross_thread_reduction.cc (100%) rename src/{op => top/operation}/extern_op.cc (99%) rename src/{op => top/operation}/hybrid_op.cc (99%) rename src/{op => top/operation}/hybrid_op.h (93%) rename src/{op => top/operation}/op_util.cc (99%) rename src/{op => top/operation}/op_util.h (94%) rename src/{op => top/operation}/placeholder_op.cc (98%) rename src/{op => top/operation}/scan_op.cc (99%) rename src/{op => top/operation}/tensor_compute_op.cc (99%) rename src/{op => top/operation}/tensorize.cc (100%) rename src/{ => top}/schedule/auto_inline_elem_wise.cc (97%) rename src/{ => top}/schedule/bound.cc (98%) rename src/{ => top}/schedule/graph.cc (99%) rename src/{ => top}/schedule/graph.h (95%) rename src/{ => top}/schedule/message_passing.cc (99%) rename src/{ => top}/schedule/message_passing.h (95%) rename src/{ => top}/schedule/schedule_dataflow_rewrite.cc (99%) rename src/{ => top}/schedule/schedule_lang.cc (99%) rename src/{ => top}/schedule/schedule_ops.cc (99%) rename src/{lang => top}/tensor.cc (97%) diff --git a/CMakeLists.txt b/CMakeLists.txt index f48e7d18d094..a62173e13e78 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -124,20 +124,24 @@ assign_source_group("Source" ${GROUP_SOURCE}) assign_source_group("Include" ${GROUP_INCLUDE}) # Source file lists -file(GLOB COMPILER_SRCS +file(GLOB_RECURSE COMPILER_SRCS src/node/*.cc src/ir/*.cc src/target/*.cc - src/api/*.cc src/arith/*.cc + src/top/*.cc + src/api/*.cc src/autotvm/*.cc - src/codegen/*.cc src/lang/*.cc src/pass/*.cc - src/op/*.cc - src/schedule/*.cc ) +file(GLOB CODEGEN_SRCS + src/codegen/*.cc + ) + +list(APPEND COMPILER_SRCS ${CODEGEN_SRCS}) + file(GLOB_RECURSE RELAY_OP_SRCS src/relay/op/*.cc ) diff --git a/include/tvm/arith/bound.h b/include/tvm/arith/bound.h index dcc4364e256b..e06954816148 100644 --- a/include/tvm/arith/bound.h +++ b/include/tvm/arith/bound.h @@ -77,7 +77,10 @@ IntSet DeduceBound(PrimExpr v, PrimExpr cond, * \param consider_provides If provides (write) are considered. * \return The domain that covers all the calls or provides within the given statement. */ -Domain DomainTouched(Stmt body, const top::Tensor &tensor, bool consider_calls, bool consider_provides); +Domain DomainTouched(Stmt body, + const top::Tensor &tensor, + bool consider_calls, + bool consider_provides); } // namespace arith } // namespace tvm diff --git a/include/tvm/build_module.h b/include/tvm/build_module.h index 7a09bdcadaf8..4e6e51744b6c 100644 --- a/include/tvm/build_module.h +++ b/include/tvm/build_module.h @@ -26,6 +26,8 @@ #include #include +#include + #include #include #include @@ -33,7 +35,7 @@ #include #include "runtime/packed_func.h" -#include "schedule_pass.h" + #include "lowered_func.h" namespace tvm { diff --git a/include/tvm/ir_pass.h b/include/tvm/ir_pass.h index 4b9638cd221d..bf444265b078 100644 --- a/include/tvm/ir_pass.h +++ b/include/tvm/ir_pass.h @@ -27,13 +27,14 @@ #ifndef TVM_IR_PASS_H_ #define TVM_IR_PASS_H_ +#include + #include #include #include #include #include "expr.h" #include "buffer.h" -#include "schedule.h" #include "lowered_func.h" namespace tvm { diff --git a/include/tvm/lowered_func.h b/include/tvm/lowered_func.h index 2b643d75f08b..b0350ae8b4dc 100644 --- a/include/tvm/lowered_func.h +++ b/include/tvm/lowered_func.h @@ -25,10 +25,11 @@ #ifndef TVM_LOWERED_FUNC_H_ #define TVM_LOWERED_FUNC_H_ +#include + #include #include "expr.h" -#include "tensor.h" #include "tvm/node/container.h" namespace tvm { diff --git a/include/tvm/packed_func_ext.h b/include/tvm/packed_func_ext.h index 32364e41629c..f7b0d0820980 100644 --- a/include/tvm/packed_func_ext.h +++ b/include/tvm/packed_func_ext.h @@ -25,13 +25,14 @@ #ifndef TVM_PACKED_FUNC_EXT_H_ #define TVM_PACKED_FUNC_EXT_H_ +#include + #include #include #include #include #include "expr.h" -#include "tensor.h" #include "runtime/packed_func.h" namespace tvm { diff --git a/include/tvm/relay/op_attr_types.h b/include/tvm/relay/op_attr_types.h index 660ddc9d0eae..385d6453fae9 100644 --- a/include/tvm/relay/op_attr_types.h +++ b/include/tvm/relay/op_attr_types.h @@ -24,8 +24,8 @@ #ifndef TVM_RELAY_OP_ATTR_TYPES_H_ #define TVM_RELAY_OP_ATTR_TYPES_H_ -#include -#include +#include +#include #include #include #include diff --git a/include/tvm/operation.h b/include/tvm/top/operation.h similarity index 98% rename from include/tvm/operation.h rename to include/tvm/top/operation.h index 34b8a621cce3..2cee21873687 100644 --- a/include/tvm/operation.h +++ b/include/tvm/top/operation.h @@ -18,26 +18,29 @@ */ /*! - * \file tvm/operation.h + * \file tvm/top/operation.h * \brief Operation node can generate one or multiple Tensors */ -#ifndef TVM_OPERATION_H_ -#define TVM_OPERATION_H_ +#ifndef TVM_TOP_OPERATION_H_ +#define TVM_TOP_OPERATION_H_ #include +#include +#include + +#include +#include +#include #include #include #include -#include "expr.h" -#include "expr_operator.h" -#include "tensor.h" -#include "schedule.h" -#include "buffer.h" + namespace tvm { namespace top { + using arith::IntSet; /*! @@ -655,6 +658,6 @@ inline Tensor compute(Array shape, inline const OperationNode* Operation::operator->() const { return static_cast(get()); } -} // namspace top +} // namespace top } // namespace tvm -#endif // TVM_OPERATION_H_ +#endif // TVM_TOP_OPERATION_H_ diff --git a/include/tvm/schedule.h b/include/tvm/top/schedule.h similarity index 99% rename from include/tvm/schedule.h rename to include/tvm/top/schedule.h index 7444076c06bc..2adaa1337812 100644 --- a/include/tvm/schedule.h +++ b/include/tvm/top/schedule.h @@ -18,18 +18,21 @@ */ /*! - * \file tvm/schedule.h + * \file tvm/top/schedule.h * \brief Define a schedule. */ // Acknowledgement: Many schedule primitives originate from Halide and Loopy. -#ifndef TVM_SCHEDULE_H_ -#define TVM_SCHEDULE_H_ +#ifndef TVM_TOP_SCHEDULE_H_ +#define TVM_TOP_SCHEDULE_H_ + +#include +#include +#include + #include #include -#include "expr.h" -#include "tensor.h" -#include "tensor_intrin.h" + namespace tvm { namespace top { @@ -766,4 +769,4 @@ inline const IterVarAttrNode* IterVarAttr::operator->() const { } } // namespace top } // namespace tvm -#endif // TVM_SCHEDULE_H_ +#endif // TVM_TOP_SCHEDULE_H_ diff --git a/include/tvm/schedule_pass.h b/include/tvm/top/schedule_pass.h similarity index 93% rename from include/tvm/schedule_pass.h rename to include/tvm/top/schedule_pass.h index de433319086f..eacc9cdcb953 100644 --- a/include/tvm/schedule_pass.h +++ b/include/tvm/top/schedule_pass.h @@ -18,17 +18,17 @@ */ /*! - * \file tvm/schedule_pass.h + * \file tvm/top/schedule_pass.h * \brief Collection of Schedule pass functions. * * These passes works on the schedule hyper-graph * and infers information such as bounds, check conditions * read/write dependencies between the IterVar */ -#ifndef TVM_SCHEDULE_PASS_H_ -#define TVM_SCHEDULE_PASS_H_ +#ifndef TVM_TOP_SCHEDULE_PASS_H_ +#define TVM_TOP_SCHEDULE_PASS_H_ -#include "schedule.h" +#include namespace tvm { namespace top { @@ -73,4 +73,4 @@ TVM_DLL void AutoInlineInjective(Schedule sch); } // namespace top } // namespace tvm -#endif // TVM_SCHEDULE_PASS_H_ +#endif // TVM_TOP_SCHEDULE_PASS_H_ diff --git a/include/tvm/tensor.h b/include/tvm/top/tensor.h similarity index 98% rename from include/tvm/tensor.h rename to include/tvm/top/tensor.h index d8aeba7bf504..bdfbbebabd6b 100644 --- a/include/tvm/tensor.h +++ b/include/tvm/top/tensor.h @@ -18,22 +18,23 @@ */ /*! - * \file tvm/tensor.h + * \file tvm/top/tensor.h * \brief Dataflow tensor object */ -#ifndef TVM_TENSOR_H_ -#define TVM_TENSOR_H_ +#ifndef TVM_TOP_TENSOR_H_ +#define TVM_TOP_TENSOR_H_ #include #include +#include +#include #include #include #include #include -#include "expr.h" -#include "expr_operator.h" + namespace tvm { namespace top { @@ -267,4 +268,4 @@ struct hash<::tvm::top::Tensor> { } }; } // namespace std -#endif // TVM_TENSOR_H_ +#endif // TVM_TOP_TENSOR_H_ diff --git a/include/tvm/tensor_intrin.h b/include/tvm/top/tensor_intrin.h similarity index 96% rename from include/tvm/tensor_intrin.h rename to include/tvm/top/tensor_intrin.h index 6367f07fe305..99eb8852f0a2 100644 --- a/include/tvm/tensor_intrin.h +++ b/include/tvm/top/tensor_intrin.h @@ -18,15 +18,17 @@ */ /*! - * \file tvm/tensor_intrin.h + * \file tvm/top/tensor_intrin.h * \brief Tensor intrinsic operations. */ -#ifndef TVM_TENSOR_INTRIN_H_ -#define TVM_TENSOR_INTRIN_H_ +#ifndef TVM_TOP_TENSOR_INTRIN_H_ +#define TVM_TOP_TENSOR_INTRIN_H_ + +#include +#include #include -#include "tensor.h" -#include "buffer.h" + namespace tvm { namespace top { @@ -176,4 +178,4 @@ inline const TensorIntrinCallNode* TensorIntrinCall::operator->() const { } // namespace top } // namespace tvm -#endif // TVM_TENSOR_INTRIN_H_ +#endif // TVM_TOP_TENSOR_INTRIN_H_ diff --git a/src/README.md b/src/README.md index b0172b665eb8..2de81416cdb0 100644 --- a/src/README.md +++ b/src/README.md @@ -24,13 +24,12 @@ There can be internal header files within each module that sit in src. - support: Internal support utilities. - runtime: Minimum runtime related codes. - node: base infra for IR/AST nodes that is dialect independent. -- api: API function registration. -- lang: The definition of DSL related data structure. - arith: Arithmetic expression and set simplification. -- op: The detail implementations about each operation(compute, scan, placeholder). -- schedule: The operations on the schedule graph before converting to IR. +- top: tensor operation DSL for compute and schedule. +- relay: Implementation of Relay. The second generation of NNVM, a new IR for deep learning frameworks. - pass: The optimization pass on the IR structure. - codegen: The code generator. - autotvm: The auto-tuning module. -- relay: Implementation of Relay. The second generation of NNVM, a new IR for deep learning frameworks. - contrib: Contrib extension libraries. +- api: API function registration. +- lang: The definition of DSL related data structure. diff --git a/src/api/api_arith.cc b/src/api/api_arith.cc index b4020eb944ea..0c28d08a7f28 100644 --- a/src/api/api_arith.cc +++ b/src/api/api_arith.cc @@ -31,7 +31,7 @@ #include #include -#include +#include namespace tvm { namespace arith { diff --git a/src/api/api_base.cc b/src/api/api_base.cc index 131ac363bb8b..4b74d02cdf9e 100644 --- a/src/api/api_base.cc +++ b/src/api/api_base.cc @@ -23,7 +23,7 @@ */ #include #include -#include +#include #include #include diff --git a/src/api/api_lang.cc b/src/api/api_lang.cc index 447254840409..89c2c5310507 100644 --- a/src/api/api_lang.cc +++ b/src/api/api_lang.cc @@ -23,10 +23,10 @@ */ #include #include -#include -#include +#include +#include #include -#include +#include #include #include diff --git a/src/api/api_schedule.cc b/src/api/api_schedule.cc index 1782920b22a4..7aa305fe0f4a 100644 --- a/src/api/api_schedule.cc +++ b/src/api/api_schedule.cc @@ -22,13 +22,13 @@ * \file api_schedule.cc */ #include -#include -#include -#include +#include +#include +#include #include #include -#include "../schedule/graph.h" +#include "../top/schedule/graph.h" namespace tvm { namespace top { diff --git a/src/api/api_test.cc b/src/api/api_test.cc index 0bc83ea148a4..957a034d427f 100644 --- a/src/api/api_test.cc +++ b/src/api/api_test.cc @@ -22,7 +22,7 @@ * \file api_test.cc */ #include -#include +#include #include #include #include diff --git a/src/arith/domain_touched.cc b/src/arith/domain_touched.cc index b1da178d6a42..6e665c8bb72b 100644 --- a/src/arith/domain_touched.cc +++ b/src/arith/domain_touched.cc @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include @@ -114,7 +114,10 @@ class FuncTouchedDomain final : public StmtExprVisitor { std::unordered_map dom_map_; }; -Domain DomainTouched(Stmt stmt, const top::Tensor &tensor, bool consider_calls, bool consider_provides) { +Domain DomainTouched(Stmt stmt, + const top::Tensor &tensor, + bool consider_calls, + bool consider_provides) { return FuncTouchedDomain(tensor, consider_calls, consider_provides).Find(stmt); } diff --git a/src/codegen/build_module.cc b/src/codegen/build_module.cc index 06f8c139008b..cfb75c4e68ef 100644 --- a/src/codegen/build_module.cc +++ b/src/codegen/build_module.cc @@ -23,7 +23,7 @@ */ #include #include -#include +#include #include #include #include diff --git a/src/contrib/hybrid/codegen_hybrid.h b/src/contrib/hybrid/codegen_hybrid.h index 66f8ee923bce..f5ba9abf1244 100644 --- a/src/contrib/hybrid/codegen_hybrid.h +++ b/src/contrib/hybrid/codegen_hybrid.h @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/pass/storage_flatten.cc b/src/pass/storage_flatten.cc index 81223351a99c..b5067658c993 100644 --- a/src/pass/storage_flatten.cc +++ b/src/pass/storage_flatten.cc @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/pass/tensor_core.cc b/src/pass/tensor_core.cc index 5e92f909b78b..bf36b0a8ffdb 100644 --- a/src/pass/tensor_core.cc +++ b/src/pass/tensor_core.cc @@ -23,7 +23,7 @@ // IR Passes for TensorCore CodeGen #include #include -#include +#include #include #include #include diff --git a/src/pass/verify_compact_buffer.cc b/src/pass/verify_compact_buffer.cc index f6c454d2abf3..95dcbddedddc 100644 --- a/src/pass/verify_compact_buffer.cc +++ b/src/pass/verify_compact_buffer.cc @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include diff --git a/src/relay/backend/compile_engine.cc b/src/relay/backend/compile_engine.cc index 3089281bac88..14967c15490f 100644 --- a/src/relay/backend/compile_engine.cc +++ b/src/relay/backend/compile_engine.cc @@ -23,9 +23,9 @@ */ #include "compile_engine.h" -#include +#include #include -#include +#include #include #include #include diff --git a/src/relay/backend/utils.h b/src/relay/backend/utils.h index 3ef740306071..79583685a6a2 100644 --- a/src/relay/backend/utils.h +++ b/src/relay/backend/utils.h @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include diff --git a/src/relay/backend/vm/compiler.cc b/src/relay/backend/vm/compiler.cc index e4a34a3974ae..00e47bcc9c5e 100644 --- a/src/relay/backend/vm/compiler.cc +++ b/src/relay/backend/vm/compiler.cc @@ -22,7 +22,7 @@ * \brief A compiler from relay::Module to the VM byte code. */ -#include +#include #include #include #include diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc index b11ec72c570c..c2af56d68d46 100644 --- a/src/relay/op/tensor/transform.cc +++ b/src/relay/op/tensor/transform.cc @@ -2563,7 +2563,8 @@ Array SequenceMaskCompute(const Attrs& attrs, const Target& target) { const auto* param = attrs.as(); CHECK(param != nullptr); - return Array{ topi::sequence_mask(inputs[0], inputs[1], param->mask_value, param->axis) }; + return Array{ + topi::sequence_mask(inputs[0], inputs[1], param->mask_value, param->axis) }; } Expr MakeSequenceMask(Expr data, diff --git a/src/relay/pass/alter_op_layout.cc b/src/relay/pass/alter_op_layout.cc index efc7eb57e082..974618209931 100644 --- a/src/relay/pass/alter_op_layout.cc +++ b/src/relay/pass/alter_op_layout.cc @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/relay/pass/convert_layout.cc b/src/relay/pass/convert_layout.cc index 18a56a4a62e4..d435efd249d3 100644 --- a/src/relay/pass/convert_layout.cc +++ b/src/relay/pass/convert_layout.cc @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/relay/pass/gradient.cc b/src/relay/pass/gradient.cc index e236de72be54..8d89c0a53d3a 100644 --- a/src/relay/pass/gradient.cc +++ b/src/relay/pass/gradient.cc @@ -23,7 +23,7 @@ */ #include -#include +#include #include #include #include diff --git a/src/relay/pass/legalize.cc b/src/relay/pass/legalize.cc index 63608084e3f8..12e72cf1fad2 100644 --- a/src/relay/pass/legalize.cc +++ b/src/relay/pass/legalize.cc @@ -23,7 +23,7 @@ * shape, dtype or layout to another op or a sequence of ops. */ -#include +#include #include #include #include diff --git a/src/op/compute_op.cc b/src/top/operation/compute_op.cc similarity index 99% rename from src/op/compute_op.cc rename to src/top/operation/compute_op.cc index b2d6d74f3e77..a8c232824346 100644 --- a/src/op/compute_op.cc +++ b/src/top/operation/compute_op.cc @@ -21,7 +21,7 @@ * \brief Compute Op. * \file compute_op.cc */ -#include +#include #include #include #include @@ -32,8 +32,8 @@ #include "compute_op.h" #include "op_util.h" #include "../schedule/message_passing.h" -#include "../arith/compute_expr.h" -#include "../arith/interval_set.h" +#include "../../arith/compute_expr.h" +#include "../../arith/interval_set.h" namespace tvm { namespace top { diff --git a/src/op/compute_op.h b/src/top/operation/compute_op.h similarity index 96% rename from src/op/compute_op.h rename to src/top/operation/compute_op.h index 3f9a98ab0be0..093dd221d8eb 100644 --- a/src/op/compute_op.h +++ b/src/top/operation/compute_op.h @@ -21,12 +21,12 @@ * \brief Helper utilities to implement compute_op. * \file compute_op.h */ -#ifndef TVM_OP_COMPUTE_OP_H_ -#define TVM_OP_COMPUTE_OP_H_ +#ifndef TVM_TOP_OPERATION_COMPUTE_OP_H_ +#define TVM_TOP_OPERATION_COMPUTE_OP_H_ #include #include -#include +#include #include #include @@ -110,4 +110,4 @@ Stmt TransformUpdate(const Stage& stage, } // namespace top } // namespace tvm -#endif // TVM_OP_COMPUTE_OP_H_ +#endif // TVM_TOP_OPERATION_COMPUTE_OP_H_ diff --git a/src/op/cross_thread_reduction.cc b/src/top/operation/cross_thread_reduction.cc similarity index 100% rename from src/op/cross_thread_reduction.cc rename to src/top/operation/cross_thread_reduction.cc diff --git a/src/op/extern_op.cc b/src/top/operation/extern_op.cc similarity index 99% rename from src/op/extern_op.cc rename to src/top/operation/extern_op.cc index 56443a5c335a..3fc73dc38b99 100644 --- a/src/op/extern_op.cc +++ b/src/top/operation/extern_op.cc @@ -21,7 +21,7 @@ * \brief External computation rule. * \file extern_op.cc */ -#include +#include #include #include #include diff --git a/src/op/hybrid_op.cc b/src/top/operation/hybrid_op.cc similarity index 99% rename from src/op/hybrid_op.cc rename to src/top/operation/hybrid_op.cc index f01f7458d9e7..d959826772f3 100644 --- a/src/op/hybrid_op.cc +++ b/src/top/operation/hybrid_op.cc @@ -21,7 +21,7 @@ * \brief Hybrid computation rule. * \file hybrid_op.cc */ -#include +#include #include #include #include diff --git a/src/op/hybrid_op.h b/src/top/operation/hybrid_op.h similarity index 93% rename from src/op/hybrid_op.h rename to src/top/operation/hybrid_op.h index d20d14fb6535..c4586cb880d3 100644 --- a/src/op/hybrid_op.h +++ b/src/top/operation/hybrid_op.h @@ -21,18 +21,19 @@ * \brief Helper utilities to implement hybrid_op. * \file hybrid_op.h */ -#ifndef TVM_OP_HYBRID_OP_H_ -#define TVM_OP_HYBRID_OP_H_ +#ifndef TVM_TOP_OPERATION_HYBRID_OP_H_ +#define TVM_TOP_OPERATION_HYBRID_OP_H_ #include -#include +#include + #include #include #include -#include "../pass/ir_util.h" -#include "../pass/arg_binder.h" -#include "../schedule/message_passing.h" +#include "../schedule/message_passing.h" +#include "../../pass/ir_util.h" +#include "../../pass/arg_binder.h" namespace tvm { namespace top { @@ -93,4 +94,4 @@ Stmt ApplyLoopOrder(const Stage &stage, } // namespace top } // namespace tvm -#endif // TVM_OP_HYBRID_OP_H_ +#endif // TVM_TOP_OPERATION_HYBRID_OP_H_ diff --git a/src/op/op_util.cc b/src/top/operation/op_util.cc similarity index 99% rename from src/op/op_util.cc rename to src/top/operation/op_util.cc index 12f14dc08dbb..fcf8318d21b8 100644 --- a/src/op/op_util.cc +++ b/src/top/operation/op_util.cc @@ -24,11 +24,11 @@ #include #include #include -#include +#include #include #include "op_util.h" #include "../schedule/message_passing.h" -#include "../arith/compute_expr.h" +#include "../../arith/compute_expr.h" namespace tvm { namespace top { diff --git a/src/op/op_util.h b/src/top/operation/op_util.h similarity index 94% rename from src/op/op_util.h rename to src/top/operation/op_util.h index 42df6da50c74..babdabc2c46a 100644 --- a/src/op/op_util.h +++ b/src/top/operation/op_util.h @@ -21,16 +21,16 @@ * \file op_util.h * \brief Common utility used in operator construction. */ -#ifndef TVM_OP_OP_UTIL_H_ -#define TVM_OP_OP_UTIL_H_ +#ifndef TVM_TOP_OPERATION_OP_UTIL_H_ +#define TVM_TOP_OPERATION_OP_UTIL_H_ #include -#include +#include #include #include #include -#include "../pass/ir_util.h" -#include "../pass/arg_binder.h" +#include "../../pass/ir_util.h" +#include "../../pass/arg_binder.h" #include "../schedule/message_passing.h" namespace tvm { @@ -104,4 +104,4 @@ ir::ForType IterVarTypeToForType(IterVarType iter_type); } // namespace top } // namespace tvm -#endif // TVM_OP_OP_UTIL_H_ +#endif // TVM_TOP_OPERATION_OP_UTIL_H_ diff --git a/src/op/placeholder_op.cc b/src/top/operation/placeholder_op.cc similarity index 98% rename from src/op/placeholder_op.cc rename to src/top/operation/placeholder_op.cc index 9121cfc3cfbc..284752b3661c 100644 --- a/src/op/placeholder_op.cc +++ b/src/top/operation/placeholder_op.cc @@ -21,7 +21,7 @@ * \brief Placeholder op. * \file placeholder_op.cc */ -#include +#include namespace tvm { namespace top { diff --git a/src/op/scan_op.cc b/src/top/operation/scan_op.cc similarity index 99% rename from src/op/scan_op.cc rename to src/top/operation/scan_op.cc index ab16b827b7d3..8f54872bc7ac 100644 --- a/src/op/scan_op.cc +++ b/src/top/operation/scan_op.cc @@ -21,7 +21,7 @@ * \brief Scan Operator. * \file scan_op.cc */ -#include +#include #include #include #include "op_util.h" diff --git a/src/op/tensor_compute_op.cc b/src/top/operation/tensor_compute_op.cc similarity index 99% rename from src/op/tensor_compute_op.cc rename to src/top/operation/tensor_compute_op.cc index d924a1e9fe87..49b00fc5d533 100644 --- a/src/op/tensor_compute_op.cc +++ b/src/top/operation/tensor_compute_op.cc @@ -21,14 +21,14 @@ * \brief Tensor Compute Op. * \file tensor_compute_op.cc */ -#include +#include #include #include #include #include #include "./op_util.h" #include "./compute_op.h" -#include "../arith/compute_expr.h" +#include "../../arith/compute_expr.h" namespace tvm { namespace top { diff --git a/src/op/tensorize.cc b/src/top/operation/tensorize.cc similarity index 100% rename from src/op/tensorize.cc rename to src/top/operation/tensorize.cc diff --git a/src/schedule/auto_inline_elem_wise.cc b/src/top/schedule/auto_inline_elem_wise.cc similarity index 97% rename from src/schedule/auto_inline_elem_wise.cc rename to src/top/schedule/auto_inline_elem_wise.cc index 0429ff6966fc..9b088132ae1b 100644 --- a/src/schedule/auto_inline_elem_wise.cc +++ b/src/top/schedule/auto_inline_elem_wise.cc @@ -20,8 +20,8 @@ /*! * \file auto_inline_elem_wise.cc */ -#include -#include +#include +#include #include namespace tvm { diff --git a/src/schedule/bound.cc b/src/top/schedule/bound.cc similarity index 98% rename from src/schedule/bound.cc rename to src/top/schedule/bound.cc index 3cc4f92e2434..8fffc5398411 100644 --- a/src/schedule/bound.cc +++ b/src/top/schedule/bound.cc @@ -21,14 +21,14 @@ * \file bound.cc * \brief The bound inference logic. */ -#include -#include +#include +#include #include #include #include #include "graph.h" #include "message_passing.h" -#include "../runtime/thread_storage_scope.h" +#include "../../runtime/thread_storage_scope.h" namespace tvm { namespace top { diff --git a/src/schedule/graph.cc b/src/top/schedule/graph.cc similarity index 99% rename from src/schedule/graph.cc rename to src/top/schedule/graph.cc index bc85f0e1eaa0..62df8422afdb 100644 --- a/src/schedule/graph.cc +++ b/src/top/schedule/graph.cc @@ -23,7 +23,7 @@ */ #include #include -#include +#include #include #include #include diff --git a/src/schedule/graph.h b/src/top/schedule/graph.h similarity index 95% rename from src/schedule/graph.h rename to src/top/schedule/graph.h index f8dc344ef8e8..f379f98ee4eb 100644 --- a/src/schedule/graph.h +++ b/src/top/schedule/graph.h @@ -21,12 +21,12 @@ * \file graph.h * \brief Utilities to get information about schedule graph. */ -#ifndef TVM_SCHEDULE_GRAPH_H_ -#define TVM_SCHEDULE_GRAPH_H_ +#ifndef TVM_TOP_SCHEDULE_GRAPH_H_ +#define TVM_TOP_SCHEDULE_GRAPH_H_ #include -#include -#include +#include +#include #include #include #include @@ -128,4 +128,4 @@ Map ScanFixPointAnalysis(const Operation& scan); } // namespace top } // namespace tvm -#endif // TVM_SCHEDULE_GRAPH_H_ +#endif // TVM_TOP_SCHEDULE_GRAPH_H_ diff --git a/src/schedule/message_passing.cc b/src/top/schedule/message_passing.cc similarity index 99% rename from src/schedule/message_passing.cc rename to src/top/schedule/message_passing.cc index ce1a4ca624d7..47326816acb6 100644 --- a/src/schedule/message_passing.cc +++ b/src/top/schedule/message_passing.cc @@ -25,7 +25,7 @@ #include #include #include "message_passing.h" -#include "../arith/compute_expr.h" +#include "../../arith/compute_expr.h" namespace tvm { namespace top { diff --git a/src/schedule/message_passing.h b/src/top/schedule/message_passing.h similarity index 95% rename from src/schedule/message_passing.h rename to src/top/schedule/message_passing.h index a0fc175cfe88..42b72a729871 100644 --- a/src/schedule/message_passing.h +++ b/src/top/schedule/message_passing.h @@ -22,12 +22,12 @@ * \brief Common utilities to do message passing * on the schedule hyper graph. */ -#ifndef TVM_SCHEDULE_MESSAGE_PASSING_H_ -#define TVM_SCHEDULE_MESSAGE_PASSING_H_ +#ifndef TVM_TOP_SCHEDULE_MESSAGE_PASSING_H_ +#define TVM_TOP_SCHEDULE_MESSAGE_PASSING_H_ #include -#include -#include +#include +#include #include #include #include @@ -130,4 +130,4 @@ MakeBoundCheck( } // namespace top } // namespace tvm -#endif // TVM_SCHEDULE_MESSAGE_PASSING_H_ +#endif // TVM_TOP_SCHEDULE_MESSAGE_PASSING_H_ diff --git a/src/schedule/schedule_dataflow_rewrite.cc b/src/top/schedule/schedule_dataflow_rewrite.cc similarity index 99% rename from src/schedule/schedule_dataflow_rewrite.cc rename to src/top/schedule/schedule_dataflow_rewrite.cc index 1daeee831f0b..5f9ba3984309 100644 --- a/src/schedule/schedule_dataflow_rewrite.cc +++ b/src/top/schedule/schedule_dataflow_rewrite.cc @@ -20,14 +20,14 @@ /*! * \file schedule_dataflow_rewrite.cc */ -#include -#include +#include +#include #include #include #include #include "message_passing.h" -#include "../pass/ir_util.h" -#include "../arith/compute_expr.h" +#include "../../pass/ir_util.h" +#include "../../arith/compute_expr.h" namespace tvm { namespace top { diff --git a/src/schedule/schedule_lang.cc b/src/top/schedule/schedule_lang.cc similarity index 99% rename from src/schedule/schedule_lang.cc rename to src/top/schedule/schedule_lang.cc index 4b8b25cc0a70..55235305d4c1 100644 --- a/src/schedule/schedule_lang.cc +++ b/src/top/schedule/schedule_lang.cc @@ -20,8 +20,8 @@ /*! * \file schedule_lang.cc */ -#include -#include +#include +#include #include #include "graph.h" diff --git a/src/schedule/schedule_ops.cc b/src/top/schedule/schedule_ops.cc similarity index 99% rename from src/schedule/schedule_ops.cc rename to src/top/schedule/schedule_ops.cc index a3beeb548971..1176d82eaead 100644 --- a/src/schedule/schedule_ops.cc +++ b/src/top/schedule/schedule_ops.cc @@ -23,14 +23,14 @@ #include #include #include -#include -#include +#include +#include #include #include #include #include "graph.h" -#include "../op/op_util.h" -#include "../pass/ir_util.h" +#include "../operation/op_util.h" +#include "../../pass/ir_util.h" namespace tvm { namespace top { diff --git a/src/lang/tensor.cc b/src/top/tensor.cc similarity index 97% rename from src/lang/tensor.cc rename to src/top/tensor.cc index 9821ee2ed56d..c8e3aeaa4e4c 100644 --- a/src/lang/tensor.cc +++ b/src/top/tensor.cc @@ -20,9 +20,9 @@ /*! * \file tensor.cc */ -#include -#include -#include +#include +#include +#include #include namespace tvm { diff --git a/tests/cpp/build_module_test.cc b/tests/cpp/build_module_test.cc index 916f7ef037fe..30834c5af46c 100644 --- a/tests/cpp/build_module_test.cc +++ b/tests/cpp/build_module_test.cc @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/tests/cpp/expr_test.cc b/tests/cpp/expr_test.cc index d9b620063f56..af8ede396edc 100644 --- a/tests/cpp/expr_test.cc +++ b/tests/cpp/expr_test.cc @@ -19,7 +19,7 @@ #include #include -#include +#include TEST(Expr, Basic) { using namespace tvm; diff --git a/tests/cpp/ir_simplify_test.cc b/tests/cpp/ir_simplify_test.cc index 6b694eff28f3..e9f0df6493a6 100644 --- a/tests/cpp/ir_simplify_test.cc +++ b/tests/cpp/ir_simplify_test.cc @@ -20,7 +20,7 @@ #include #include #include -#include +#include TEST(IRSIMPLIFY, MinMax) { auto x = tvm::var("x"); diff --git a/tests/cpp/relay_build_module_test.cc b/tests/cpp/relay_build_module_test.cc index df2a6b6d9717..462d0fe12eba 100644 --- a/tests/cpp/relay_build_module_test.cc +++ b/tests/cpp/relay_build_module_test.cc @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include #include diff --git a/tests/cpp/relay_pass_type_infer_test.cc b/tests/cpp/relay_pass_type_infer_test.cc index 7d03d2e0ef87..68d5d0d67f74 100644 --- a/tests/cpp/relay_pass_type_infer_test.cc +++ b/tests/cpp/relay_pass_type_infer_test.cc @@ -18,7 +18,7 @@ */ #include -#include +#include #include #include #include diff --git a/tests/cpp/relay_transform_sequential.cc b/tests/cpp/relay_transform_sequential.cc index 8321c580ab0f..4c383b5af4e1 100644 --- a/tests/cpp/relay_transform_sequential.cc +++ b/tests/cpp/relay_transform_sequential.cc @@ -28,7 +28,7 @@ #include #include #include -#include +#include TVM_REGISTER_GLOBAL("schedule") .set_body([](tvm::TVMArgs args, tvm::TVMRetValue* rv) { diff --git a/tests/cpp/simple_passes_test.cc b/tests/cpp/simple_passes_test.cc index aabb6c31e0db..e41b881faa43 100644 --- a/tests/cpp/simple_passes_test.cc +++ b/tests/cpp/simple_passes_test.cc @@ -20,7 +20,7 @@ #include #include #include -#include +#include TEST(SimplePasses, HasSideEffect) { using namespace tvm; diff --git a/tests/cpp/tensor_test.cc b/tests/cpp/tensor_test.cc index f8973211231c..5d6dc239f5c3 100644 --- a/tests/cpp/tensor_test.cc +++ b/tests/cpp/tensor_test.cc @@ -19,7 +19,7 @@ #include #include -#include +#include TEST(Tensor, Basic) { using namespace tvm; diff --git a/tests/cpp/topi_ewise_test.cc b/tests/cpp/topi_ewise_test.cc index 55a91452c189..5f89bdf9641a 100644 --- a/tests/cpp/topi_ewise_test.cc +++ b/tests/cpp/topi_ewise_test.cc @@ -17,7 +17,7 @@ * under the License. */ -#include +#include #include #include diff --git a/tests/cpp/utvm_runtime_standalone_test.cc b/tests/cpp/utvm_runtime_standalone_test.cc index a3720cd4d1a2..73a624584749 100644 --- a/tests/cpp/utvm_runtime_standalone_test.cc +++ b/tests/cpp/utvm_runtime_standalone_test.cc @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/topi/include/topi/contrib/cublas.h b/topi/include/topi/contrib/cublas.h index 3766ee300132..44685fc165a2 100644 --- a/topi/include/topi/contrib/cublas.h +++ b/topi/include/topi/contrib/cublas.h @@ -24,7 +24,7 @@ #ifndef TOPI_CONTRIB_CUBLAS_H_ #define TOPI_CONTRIB_CUBLAS_H_ -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "topi/detail/extern.h" namespace topi { diff --git a/topi/include/topi/contrib/rocblas.h b/topi/include/topi/contrib/rocblas.h index e061e44f8f90..062ad405c3d1 100644 --- a/topi/include/topi/contrib/rocblas.h +++ b/topi/include/topi/contrib/rocblas.h @@ -24,7 +24,7 @@ #ifndef TOPI_CONTRIB_ROCBLAS_H_ #define TOPI_CONTRIB_ROCBLAS_H_ -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "topi/detail/extern.h" namespace topi { diff --git a/topi/include/topi/cuda/dense.h b/topi/include/topi/cuda/dense.h index 5a55c38d1127..637a861cd989 100644 --- a/topi/include/topi/cuda/dense.h +++ b/topi/include/topi/cuda/dense.h @@ -24,7 +24,7 @@ #ifndef TOPI_CUDA_DENSE_H_ #define TOPI_CUDA_DENSE_H_ -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/build_module.h" #include "topi/tags.h" #include "topi/detail/array_utils.h" diff --git a/topi/include/topi/cuda/injective.h b/topi/include/topi/cuda/injective.h index 1bcdf91e7047..5e42126729cc 100644 --- a/topi/include/topi/cuda/injective.h +++ b/topi/include/topi/cuda/injective.h @@ -26,7 +26,7 @@ #include "topi/tags.h" #include "topi/detail/fuse.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/build_module.h" namespace topi { diff --git a/topi/include/topi/cuda/normalization.h b/topi/include/topi/cuda/normalization.h index f6d7c255b707..708f8d5e7bbf 100644 --- a/topi/include/topi/cuda/normalization.h +++ b/topi/include/topi/cuda/normalization.h @@ -24,7 +24,7 @@ #ifndef TOPI_CUDA_NORMALIZATION_H_ #define TOPI_CUDA_NORMALIZATION_H_ -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/build_module.h" #include "topi/tags.h" diff --git a/topi/include/topi/cuda/pooling.h b/topi/include/topi/cuda/pooling.h index ec06371b4cd9..d2a5c1f4511f 100644 --- a/topi/include/topi/cuda/pooling.h +++ b/topi/include/topi/cuda/pooling.h @@ -27,7 +27,7 @@ #include "topi/tags.h" #include "topi/detail/fuse.h" #include "topi/detail/array_utils.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/build_module.h" namespace topi { diff --git a/topi/include/topi/cuda/reduction.h b/topi/include/topi/cuda/reduction.h index 86f0fe86fe54..244567499d3f 100644 --- a/topi/include/topi/cuda/reduction.h +++ b/topi/include/topi/cuda/reduction.h @@ -26,7 +26,7 @@ #include "topi/tags.h" #include "topi/detail/fuse.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/build_module.h" namespace topi { diff --git a/topi/include/topi/cuda/softmax.h b/topi/include/topi/cuda/softmax.h index b2ba0bcbafd4..6f12de000bf8 100644 --- a/topi/include/topi/cuda/softmax.h +++ b/topi/include/topi/cuda/softmax.h @@ -26,7 +26,7 @@ #include "topi/tags.h" #include "topi/detail/fuse.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/build_module.h" namespace topi { diff --git a/topi/include/topi/detail/array_utils.h b/topi/include/topi/detail/array_utils.h index 07cce9af693e..0c0feec2eaa7 100644 --- a/topi/include/topi/detail/array_utils.h +++ b/topi/include/topi/detail/array_utils.h @@ -24,7 +24,7 @@ #ifndef TOPI_DETAIL_ARRAY_UTILS_H_ #define TOPI_DETAIL_ARRAY_UTILS_H_ -#include "tvm/operation.h" +#include "tvm/top/operation.h" namespace topi { namespace detail { diff --git a/topi/include/topi/detail/broadcast.h b/topi/include/topi/detail/broadcast.h index dc585439ce89..2e644eebdc8c 100644 --- a/topi/include/topi/detail/broadcast.h +++ b/topi/include/topi/detail/broadcast.h @@ -29,7 +29,7 @@ #include #include "tvm/ir_pass.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/expr_operator.h" #include "topi/detail/constant_utils.h" diff --git a/topi/include/topi/detail/extern.h b/topi/include/topi/detail/extern.h index 532b704d6fac..717ce4d46d33 100644 --- a/topi/include/topi/detail/extern.h +++ b/topi/include/topi/detail/extern.h @@ -24,7 +24,7 @@ #ifndef TOPI_DETAIL_EXTERN_H_ #define TOPI_DETAIL_EXTERN_H_ -#include +#include #include #include diff --git a/topi/include/topi/detail/fuse.h b/topi/include/topi/detail/fuse.h index 91f40ad0ae7b..5a77db6b31f2 100644 --- a/topi/include/topi/detail/fuse.h +++ b/topi/include/topi/detail/fuse.h @@ -24,7 +24,7 @@ #ifndef TOPI_DETAIL_FUSE_H_ #define TOPI_DETAIL_FUSE_H_ -#include "tvm/operation.h" +#include "tvm/top/operation.h" namespace topi { namespace detail { diff --git a/topi/include/topi/detail/ravel_unravel.h b/topi/include/topi/detail/ravel_unravel.h index 40ebf11e4e5c..c8da45d918b2 100644 --- a/topi/include/topi/detail/ravel_unravel.h +++ b/topi/include/topi/detail/ravel_unravel.h @@ -26,7 +26,7 @@ #include -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/expr_operator.h" namespace topi { diff --git a/topi/include/topi/generic/default.h b/topi/include/topi/generic/default.h index 931631ea7a74..540e4a17f148 100644 --- a/topi/include/topi/generic/default.h +++ b/topi/include/topi/generic/default.h @@ -26,7 +26,7 @@ #include "topi/tags.h" #include "topi/detail/fuse.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/build_module.h" namespace topi { diff --git a/topi/include/topi/generic/extern.h b/topi/include/topi/generic/extern.h index 0d685c63d5b5..ea1e7b6531a6 100644 --- a/topi/include/topi/generic/extern.h +++ b/topi/include/topi/generic/extern.h @@ -26,7 +26,7 @@ #include "topi/tags.h" #include "topi/detail/fuse.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/build_module.h" #include "injective.h" diff --git a/topi/include/topi/generic/injective.h b/topi/include/topi/generic/injective.h index 0913dda13c36..7df2e1539151 100644 --- a/topi/include/topi/generic/injective.h +++ b/topi/include/topi/generic/injective.h @@ -26,7 +26,7 @@ #include "topi/tags.h" #include "topi/detail/fuse.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/build_module.h" namespace topi { diff --git a/topi/include/topi/image/resize.h b/topi/include/topi/image/resize.h index c0075e06d168..7c1bad3dc9a7 100644 --- a/topi/include/topi/image/resize.h +++ b/topi/include/topi/image/resize.h @@ -33,7 +33,7 @@ #include "topi/elemwise.h" #include "topi/detail/ravel_unravel.h" #include "topi/detail/constant_utils.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/expr_operator.h" namespace topi { diff --git a/topi/include/topi/nn.h b/topi/include/topi/nn.h index 0c3a4ceaabde..b86c00c60ae3 100644 --- a/topi/include/topi/nn.h +++ b/topi/include/topi/nn.h @@ -31,7 +31,7 @@ #include "topi/detail/constant_utils.h" #include "tvm/ir.h" #include "tvm/ir_pass.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/expr_operator.h" namespace topi { diff --git a/topi/include/topi/nn/batch_matmul.h b/topi/include/topi/nn/batch_matmul.h index d31b831fcbbc..a3bd96df77d8 100644 --- a/topi/include/topi/nn/batch_matmul.h +++ b/topi/include/topi/nn/batch_matmul.h @@ -27,7 +27,7 @@ #include #include "topi/tags.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" namespace topi { namespace nn { diff --git a/topi/include/topi/nn/bias_add.h b/topi/include/topi/nn/bias_add.h index 1eb43a089044..2d6f47ca8b3e 100644 --- a/topi/include/topi/nn/bias_add.h +++ b/topi/include/topi/nn/bias_add.h @@ -29,7 +29,7 @@ #include "topi/tags.h" #include "topi/broadcast.h" #include "topi/transform.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/expr_operator.h" namespace topi { diff --git a/topi/include/topi/nn/bnn.h b/topi/include/topi/nn/bnn.h index 47f6d0c223b3..7c920347b68f 100644 --- a/topi/include/topi/nn/bnn.h +++ b/topi/include/topi/nn/bnn.h @@ -26,7 +26,7 @@ #include -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/ir_pass.h" #include "topi/tags.h" #include "topi/detail/constant_utils.h" diff --git a/topi/include/topi/nn/dense.h b/topi/include/topi/nn/dense.h index 68c50f64e076..7cdc8d7b5be3 100644 --- a/topi/include/topi/nn/dense.h +++ b/topi/include/topi/nn/dense.h @@ -27,7 +27,7 @@ #include #include "topi/tags.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" namespace topi { namespace nn { diff --git a/topi/include/topi/nn/dilate.h b/topi/include/topi/nn/dilate.h index 8dc3d1516f6a..6ffb3da25e52 100644 --- a/topi/include/topi/nn/dilate.h +++ b/topi/include/topi/nn/dilate.h @@ -26,7 +26,7 @@ #include -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/ir_pass.h" #include "topi/tags.h" diff --git a/topi/include/topi/nn/flatten.h b/topi/include/topi/nn/flatten.h index 510c304572ac..de11b6dd797e 100644 --- a/topi/include/topi/nn/flatten.h +++ b/topi/include/topi/nn/flatten.h @@ -29,7 +29,7 @@ #include "topi/tags.h" #include "topi/detail/constant_utils.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/expr_operator.h" diff --git a/topi/include/topi/nn/l2_normalize.h b/topi/include/topi/nn/l2_normalize.h index 5ad32a6c65da..6670e6da3fc3 100644 --- a/topi/include/topi/nn/l2_normalize.h +++ b/topi/include/topi/nn/l2_normalize.h @@ -27,7 +27,7 @@ #include #include #include "topi/tags.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" namespace topi { namespace nn { using namespace tvm; diff --git a/topi/include/topi/nn/local_response_norm.h b/topi/include/topi/nn/local_response_norm.h index c9836e265dca..cd3b9b2456e1 100644 --- a/topi/include/topi/nn/local_response_norm.h +++ b/topi/include/topi/nn/local_response_norm.h @@ -27,7 +27,7 @@ #include #include "topi/tags.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" namespace topi { namespace nn { diff --git a/topi/include/topi/nn/mapping.h b/topi/include/topi/nn/mapping.h index 156696499819..4cd2fe1b7708 100644 --- a/topi/include/topi/nn/mapping.h +++ b/topi/include/topi/nn/mapping.h @@ -27,7 +27,7 @@ #include #include "topi/tags.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" namespace topi { namespace nn { diff --git a/topi/include/topi/nn/softmax.h b/topi/include/topi/nn/softmax.h index 0ae5c106c50b..72e17454b724 100644 --- a/topi/include/topi/nn/softmax.h +++ b/topi/include/topi/nn/softmax.h @@ -29,7 +29,7 @@ #include "topi/reduction.h" #include "topi/tags.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/expr_operator.h" namespace topi { diff --git a/topi/include/topi/reduction.h b/topi/include/topi/reduction.h index fa405f94b8bc..197ef2b152c4 100644 --- a/topi/include/topi/reduction.h +++ b/topi/include/topi/reduction.h @@ -35,7 +35,7 @@ #include "topi/transform.h" #include "topi/detail/ravel_unravel.h" #include "topi/detail/constant_utils.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/expr_operator.h" diff --git a/topi/include/topi/rocm/dense.h b/topi/include/topi/rocm/dense.h index b3146db6c964..00297489cd28 100644 --- a/topi/include/topi/rocm/dense.h +++ b/topi/include/topi/rocm/dense.h @@ -24,7 +24,7 @@ #ifndef TOPI_ROCM_DENSE_H_ #define TOPI_ROCM_DENSE_H_ -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/build_module.h" #include "topi/tags.h" #include "topi/detail/array_utils.h" diff --git a/topi/include/topi/rocm/injective.h b/topi/include/topi/rocm/injective.h index b521ff0cabb6..b2999f4d8bb5 100644 --- a/topi/include/topi/rocm/injective.h +++ b/topi/include/topi/rocm/injective.h @@ -26,7 +26,7 @@ #include "topi/tags.h" #include "topi/detail/fuse.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/build_module.h" #include "topi/cuda/injective.h" diff --git a/topi/include/topi/rocm/normalization.h b/topi/include/topi/rocm/normalization.h index ed33b69fce1f..7dde9dc77140 100644 --- a/topi/include/topi/rocm/normalization.h +++ b/topi/include/topi/rocm/normalization.h @@ -24,7 +24,7 @@ #ifndef TOPI_ROCM_NORMALIZATION_H_ #define TOPI_ROCM_NORMALIZATION_H_ -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/build_module.h" #include "topi/tags.h" diff --git a/topi/include/topi/rocm/pooling.h b/topi/include/topi/rocm/pooling.h index 98ae6d80902a..14f0aa0c66ce 100644 --- a/topi/include/topi/rocm/pooling.h +++ b/topi/include/topi/rocm/pooling.h @@ -27,7 +27,7 @@ #include "topi/tags.h" #include "topi/detail/fuse.h" #include "topi/detail/array_utils.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/build_module.h" #include "topi/cuda/pooling.h" diff --git a/topi/include/topi/rocm/reduction.h b/topi/include/topi/rocm/reduction.h index 89be35340535..4b788ee603cd 100644 --- a/topi/include/topi/rocm/reduction.h +++ b/topi/include/topi/rocm/reduction.h @@ -26,7 +26,7 @@ #include "topi/tags.h" #include "topi/detail/fuse.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/build_module.h" #include "topi/cuda/reduction.h" diff --git a/topi/include/topi/rocm/softmax.h b/topi/include/topi/rocm/softmax.h index 4c6785b062ba..43f27318a4df 100644 --- a/topi/include/topi/rocm/softmax.h +++ b/topi/include/topi/rocm/softmax.h @@ -26,7 +26,7 @@ #include "topi/tags.h" #include "topi/detail/fuse.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/build_module.h" #include "topi/cuda/softmax.h" diff --git a/topi/include/topi/transform.h b/topi/include/topi/transform.h index 7df17a1595f9..41a64ebb45ad 100644 --- a/topi/include/topi/transform.h +++ b/topi/include/topi/transform.h @@ -35,7 +35,7 @@ #include "topi/detail/ravel_unravel.h" #include "topi/detail/constant_utils.h" #include "topi/detail/tensor_utils.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/expr_operator.h" #include "tvm/data_layout.h" diff --git a/topi/include/topi/vision/reorg.h b/topi/include/topi/vision/reorg.h index bf01704f36e8..c5ddea9fdeb0 100644 --- a/topi/include/topi/vision/reorg.h +++ b/topi/include/topi/vision/reorg.h @@ -31,7 +31,7 @@ #include "topi/reduction.h" #include "topi/tags.h" #include "topi/transform.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/expr_operator.h" namespace topi { diff --git a/topi/include/topi/x86/bnn.h b/topi/include/topi/x86/bnn.h index 66c02e461fe9..578360e9a010 100644 --- a/topi/include/topi/x86/bnn.h +++ b/topi/include/topi/x86/bnn.h @@ -26,7 +26,7 @@ #include "topi/tags.h" #include "topi/detail/fuse.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/build_module.h" namespace topi { diff --git a/topi/include/topi/x86/default.h b/topi/include/topi/x86/default.h index 921d1474569c..e3ac0110ea49 100644 --- a/topi/include/topi/x86/default.h +++ b/topi/include/topi/x86/default.h @@ -26,7 +26,7 @@ #include "topi/tags.h" #include "topi/detail/fuse.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/build_module.h" namespace topi { diff --git a/topi/include/topi/x86/injective.h b/topi/include/topi/x86/injective.h index c0a28222ceea..12c413e30f11 100644 --- a/topi/include/topi/x86/injective.h +++ b/topi/include/topi/x86/injective.h @@ -26,7 +26,7 @@ #include "topi/tags.h" #include "topi/detail/fuse.h" -#include "tvm/operation.h" +#include "tvm/top/operation.h" #include "tvm/build_module.h" namespace topi {