Skip to content

Commit

Permalink
Move relevant files into include/tvm/top and src/top
Browse files Browse the repository at this point in the history
  • Loading branch information
tqchen committed Jan 16, 2020
1 parent d159271 commit cc7772d
Show file tree
Hide file tree
Showing 104 changed files with 195 additions and 170 deletions.
14 changes: 9 additions & 5 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -124,20 +124,24 @@ assign_source_group("Source" ${GROUP_SOURCE})
assign_source_group("Include" ${GROUP_INCLUDE})

# Source file lists
file(GLOB COMPILER_SRCS
file(GLOB_RECURSE COMPILER_SRCS
src/node/*.cc
src/ir/*.cc
src/target/*.cc
src/api/*.cc
src/arith/*.cc
src/top/*.cc
src/api/*.cc
src/autotvm/*.cc
src/codegen/*.cc
src/lang/*.cc
src/pass/*.cc
src/op/*.cc
src/schedule/*.cc
)

file(GLOB CODEGEN_SRCS
src/codegen/*.cc
)

list(APPEND COMPILER_SRCS ${CODEGEN_SRCS})

file(GLOB_RECURSE RELAY_OP_SRCS
src/relay/op/*.cc
)
Expand Down
5 changes: 4 additions & 1 deletion include/tvm/arith/bound.h
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,10 @@ IntSet DeduceBound(PrimExpr v, PrimExpr cond,
* \param consider_provides If provides (write) are considered.
* \return The domain that covers all the calls or provides within the given statement.
*/
Domain DomainTouched(Stmt body, const top::Tensor &tensor, bool consider_calls, bool consider_provides);
Domain DomainTouched(Stmt body,
const top::Tensor &tensor,
bool consider_calls,
bool consider_provides);

} // namespace arith
} // namespace tvm
Expand Down
4 changes: 3 additions & 1 deletion include/tvm/build_module.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,16 @@

#include <tvm/target/target.h>
#include <tvm/support/with.h>
#include <tvm/top/schedule_pass.h>

#include <string>
#include <vector>
#include <utility>
#include <unordered_map>
#include <unordered_set>

#include "runtime/packed_func.h"
#include "schedule_pass.h"

#include "lowered_func.h"

namespace tvm {
Expand Down
3 changes: 2 additions & 1 deletion include/tvm/ir_pass.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,14 @@
#ifndef TVM_IR_PASS_H_
#define TVM_IR_PASS_H_

#include <tvm/top/schedule.h>

#include <unordered_map>
#include <unordered_set>
#include <vector>
#include <string>
#include "expr.h"
#include "buffer.h"
#include "schedule.h"
#include "lowered_func.h"

namespace tvm {
Expand Down
3 changes: 2 additions & 1 deletion include/tvm/lowered_func.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,11 @@
#ifndef TVM_LOWERED_FUNC_H_
#define TVM_LOWERED_FUNC_H_

#include <tvm/top/tensor.h>

#include <string>

#include "expr.h"
#include "tensor.h"
#include "tvm/node/container.h"

namespace tvm {
Expand Down
3 changes: 2 additions & 1 deletion include/tvm/packed_func_ext.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,14 @@
#ifndef TVM_PACKED_FUNC_EXT_H_
#define TVM_PACKED_FUNC_EXT_H_

#include <tvm/top/tensor.h>

#include <string>
#include <memory>
#include <limits>
#include <type_traits>

#include "expr.h"
#include "tensor.h"
#include "runtime/packed_func.h"

namespace tvm {
Expand Down
4 changes: 2 additions & 2 deletions include/tvm/relay/op_attr_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@
#ifndef TVM_RELAY_OP_ATTR_TYPES_H_
#define TVM_RELAY_OP_ATTR_TYPES_H_

#include <tvm/tensor.h>
#include <tvm/schedule.h>
#include <tvm/top/tensor.h>
#include <tvm/top/schedule.h>
#include <tvm/build_module.h>
#include <tvm/relay/type.h>
#include <tvm/relay/expr.h>
Expand Down
23 changes: 13 additions & 10 deletions include/tvm/operation.h → include/tvm/top/operation.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,26 +18,29 @@
*/

/*!
* \file tvm/operation.h
* \file tvm/top/operation.h
* \brief Operation node can generate one or multiple Tensors
*/
#ifndef TVM_OPERATION_H_
#define TVM_OPERATION_H_
#ifndef TVM_TOP_OPERATION_H_
#define TVM_TOP_OPERATION_H_

#include <tvm/arith/analyzer.h>
#include <tvm/top/tensor.h>
#include <tvm/top/schedule.h>

#include <tvm/expr.h>
#include <tvm/expr_operator.h>
#include <tvm/buffer.h>

#include <string>
#include <vector>
#include <unordered_map>

#include "expr.h"
#include "expr_operator.h"
#include "tensor.h"
#include "schedule.h"
#include "buffer.h"


namespace tvm {
namespace top {

using arith::IntSet;

/*!
Expand Down Expand Up @@ -655,6 +658,6 @@ inline Tensor compute(Array<PrimExpr> shape,
inline const OperationNode* Operation::operator->() const {
return static_cast<const OperationNode*>(get());
}
} // namspace top
} // namespace top
} // namespace tvm
#endif // TVM_OPERATION_H_
#endif // TVM_TOP_OPERATION_H_
17 changes: 10 additions & 7 deletions include/tvm/schedule.h → include/tvm/top/schedule.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,18 +18,21 @@
*/

/*!
* \file tvm/schedule.h
* \file tvm/top/schedule.h
* \brief Define a schedule.
*/
// Acknowledgement: Many schedule primitives originate from Halide and Loopy.
#ifndef TVM_SCHEDULE_H_
#define TVM_SCHEDULE_H_
#ifndef TVM_TOP_SCHEDULE_H_
#define TVM_TOP_SCHEDULE_H_

#include <tvm/expr.h>
#include <tvm/top/tensor.h>
#include <tvm/top/tensor_intrin.h>


#include <string>
#include <unordered_map>
#include "expr.h"
#include "tensor.h"
#include "tensor_intrin.h"


namespace tvm {
namespace top {
Expand Down Expand Up @@ -766,4 +769,4 @@ inline const IterVarAttrNode* IterVarAttr::operator->() const {
}
} // namespace top
} // namespace tvm
#endif // TVM_SCHEDULE_H_
#endif // TVM_TOP_SCHEDULE_H_
10 changes: 5 additions & 5 deletions include/tvm/schedule_pass.h → include/tvm/top/schedule_pass.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,17 @@
*/

/*!
* \file tvm/schedule_pass.h
* \file tvm/top/schedule_pass.h
* \brief Collection of Schedule pass functions.
*
* These passes works on the schedule hyper-graph
* and infers information such as bounds, check conditions
* read/write dependencies between the IterVar
*/
#ifndef TVM_SCHEDULE_PASS_H_
#define TVM_SCHEDULE_PASS_H_
#ifndef TVM_TOP_SCHEDULE_PASS_H_
#define TVM_TOP_SCHEDULE_PASS_H_

#include "schedule.h"
#include <tvm/top/schedule.h>

namespace tvm {
namespace top {
Expand Down Expand Up @@ -73,4 +73,4 @@ TVM_DLL void AutoInlineInjective(Schedule sch);

} // namespace top
} // namespace tvm
#endif // TVM_SCHEDULE_PASS_H_
#endif // TVM_TOP_SCHEDULE_PASS_H_
13 changes: 7 additions & 6 deletions include/tvm/tensor.h → include/tvm/top/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,22 +18,23 @@
*/

/*!
* \file tvm/tensor.h
* \file tvm/top/tensor.h
* \brief Dataflow tensor object
*/
#ifndef TVM_TENSOR_H_
#define TVM_TENSOR_H_
#ifndef TVM_TOP_TENSOR_H_
#define TVM_TOP_TENSOR_H_

#include <tvm/node/container.h>
#include <tvm/arith/bound.h>
#include <tvm/expr.h>
#include <tvm/expr_operator.h>

#include <string>
#include <vector>
#include <utility>
#include <type_traits>

#include "expr.h"
#include "expr_operator.h"


namespace tvm {
namespace top {
Expand Down Expand Up @@ -267,4 +268,4 @@ struct hash<::tvm::top::Tensor> {
}
};
} // namespace std
#endif // TVM_TENSOR_H_
#endif // TVM_TOP_TENSOR_H_
14 changes: 8 additions & 6 deletions include/tvm/tensor_intrin.h → include/tvm/top/tensor_intrin.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,17 @@
*/

/*!
* \file tvm/tensor_intrin.h
* \file tvm/top/tensor_intrin.h
* \brief Tensor intrinsic operations.
*/
#ifndef TVM_TENSOR_INTRIN_H_
#define TVM_TENSOR_INTRIN_H_
#ifndef TVM_TOP_TENSOR_INTRIN_H_
#define TVM_TOP_TENSOR_INTRIN_H_

#include <tvm/top/tensor.h>
#include <tvm/buffer.h>

#include <string>
#include "tensor.h"
#include "buffer.h"


namespace tvm {
namespace top {
Expand Down Expand Up @@ -176,4 +178,4 @@ inline const TensorIntrinCallNode* TensorIntrinCall::operator->() const {

} // namespace top
} // namespace tvm
#endif // TVM_TENSOR_INTRIN_H_
#endif // TVM_TOP_TENSOR_INTRIN_H_
9 changes: 4 additions & 5 deletions src/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,12 @@ There can be internal header files within each module that sit in src.
- support: Internal support utilities.
- runtime: Minimum runtime related codes.
- node: base infra for IR/AST nodes that is dialect independent.
- api: API function registration.
- lang: The definition of DSL related data structure.
- arith: Arithmetic expression and set simplification.
- op: The detail implementations about each operation(compute, scan, placeholder).
- schedule: The operations on the schedule graph before converting to IR.
- top: tensor operation DSL for compute and schedule.
- relay: Implementation of Relay. The second generation of NNVM, a new IR for deep learning frameworks.
- pass: The optimization pass on the IR structure.
- codegen: The code generator.
- autotvm: The auto-tuning module.
- relay: Implementation of Relay. The second generation of NNVM, a new IR for deep learning frameworks.
- contrib: Contrib extension libraries.
- api: API function registration.
- lang: The definition of DSL related data structure.
2 changes: 1 addition & 1 deletion src/api/api_arith.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
#include <tvm/runtime/registry.h>
#include <tvm/packed_func_ext.h>

#include <tvm/tensor.h>
#include <tvm/top/tensor.h>

namespace tvm {
namespace arith {
Expand Down
2 changes: 1 addition & 1 deletion src/api/api_base.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
*/
#include <dmlc/memory_io.h>
#include <tvm/expr.h>
#include <tvm/tensor.h>
#include <tvm/top/tensor.h>
#include <tvm/runtime/registry.h>
#include <tvm/packed_func_ext.h>

Expand Down
6 changes: 3 additions & 3 deletions src/api/api_lang.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,10 @@
*/
#include <tvm/expr.h>
#include <tvm/ir.h>
#include <tvm/tensor.h>
#include <tvm/operation.h>
#include <tvm/top/tensor.h>
#include <tvm/top/operation.h>
#include <tvm/buffer.h>
#include <tvm/schedule.h>
#include <tvm/top/schedule.h>
#include <tvm/runtime/registry.h>
#include <tvm/packed_func_ext.h>

Expand Down
8 changes: 4 additions & 4 deletions src/api/api_schedule.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,13 @@
* \file api_schedule.cc
*/
#include <tvm/expr.h>
#include <tvm/tensor.h>
#include <tvm/schedule.h>
#include <tvm/schedule_pass.h>
#include <tvm/top/tensor.h>
#include <tvm/top/schedule.h>
#include <tvm/top/schedule_pass.h>
#include <tvm/runtime/registry.h>
#include <tvm/packed_func_ext.h>

#include "../schedule/graph.h"
#include "../top/schedule/graph.h"

namespace tvm {
namespace top {
Expand Down
2 changes: 1 addition & 1 deletion src/api/api_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
* \file api_test.cc
*/
#include <tvm/expr.h>
#include <tvm/tensor.h>
#include <tvm/top/tensor.h>
#include <tvm/ir/attrs.h>
#include <tvm/runtime/registry.h>
#include <tvm/ir/env_func.h>
Expand Down
7 changes: 5 additions & 2 deletions src/arith/domain_touched.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
#include <tvm/expr.h>
#include <tvm/ir_pass.h>
#include <tvm/ir_functor_ext.h>
#include <tvm/tensor.h>
#include <tvm/top/tensor.h>
#include <tvm/runtime/registry.h>
#include <tvm/packed_func_ext.h>

Expand Down Expand Up @@ -114,7 +114,10 @@ class FuncTouchedDomain final : public StmtExprVisitor {
std::unordered_map<const VarNode*, IntSet> dom_map_;
};

Domain DomainTouched(Stmt stmt, const top::Tensor &tensor, bool consider_calls, bool consider_provides) {
Domain DomainTouched(Stmt stmt,
const top::Tensor &tensor,
bool consider_calls,
bool consider_provides) {
return FuncTouchedDomain(tensor, consider_calls, consider_provides).Find(stmt);
}

Expand Down
2 changes: 1 addition & 1 deletion src/codegen/build_module.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
*/
#include <dmlc/thread_local.h>
#include <tvm/build_module.h>
#include <tvm/operation.h>
#include <tvm/top/operation.h>
#include <tvm/ir_pass.h>
#include <tvm/codegen.h>
#include <tvm/runtime/registry.h>
Expand Down
Loading

0 comments on commit cc7772d

Please sign in to comment.