-
Notifications
You must be signed in to change notification settings - Fork 3.5k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Defined a common base class for TensorComputeOp and ComputeOp #2587
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -184,22 +184,45 @@ class PlaceholderOpNode : public OperationNode { | |
|
||
/*! | ||
* \brief A Compute op that compute a tensor on certain domain. | ||
* This is the base class for ComputeOp (operating on a scalar at a time) and | ||
* TensorComputeOp (operating on a TensorSlice at a time) | ||
*/ | ||
class TVM_DLL ComputeOpNode : public OperationNode { | ||
class TVM_DLL BaseComputeOpNode : public OperationNode { | ||
public: | ||
/*! \brief IterVar on each axis */ | ||
Array<IterVar> axis; | ||
/*! \brief IterVar on each reduction axis, if the body is a Reduce */ | ||
Array<IterVar> reduce_axis; | ||
// override functions | ||
Array<IterVar> root_iter_vars() const final; | ||
Array<Expr> output_shape(size_t idx) const final; | ||
void GatherBound( | ||
const Operation& self, | ||
const std::unordered_map<Tensor, TensorDom>& tensor_dom, | ||
std::unordered_map<IterVar, Range>* out_dom_map) const final; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Since ComputeOpNode is an exposed structure, can we keep it as the original, but we have another BasicComputeOpNode; ComputeOpNode and TensorComputeOpNode inherit it. in this way, we can avoid so many modifications, also we can save modifications for those in-house projects based on tvm. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The way I see it is that TVM has defined a Regarding the amount of modifications needed both inside and outside TVM:
That's my two cents. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
agree. what i'm suggesting has no conflicts with this point.
We do have some custmized pass(C++ and python) and costmized schedule template(python) , in which we referenced ComputeOpNode and the code is not in public, if this is merged, we need to replace them with ScalarComputeOpNode when sync with upstream. I believe other inhouse projects may have the similar issue with this.
I don't see much differences if we change ComputeOpNode to ScalarComputeOpNode, since everybody who uses
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks @xqdan. I followed the naming scheme you recommended. I used |
||
Stmt BuildRealize( | ||
const Stage& stage, | ||
const std::unordered_map<IterVar, Range>& realize_map, | ||
const Stmt& body) const final; | ||
virtual size_t num_schedulable_dims() const = 0; | ||
|
||
static constexpr const char* _type_key = "BaseComputeOp"; | ||
TVM_DECLARE_BASE_NODE_INFO(BaseComputeOpNode, OperationNode); | ||
}; | ||
|
||
|
||
/*! | ||
* \brief A Compute op that compute a tensor on certain domain. | ||
*/ | ||
class TVM_DLL ComputeOpNode : public BaseComputeOpNode { | ||
public: | ||
/*! \brief the compute expression */ | ||
Array<Expr> body; | ||
/*! \brief constructor */ | ||
ComputeOpNode() {} | ||
// override functions | ||
int num_outputs() const final; | ||
Array<IterVar> root_iter_vars() const final; | ||
Type output_dtype(size_t i) const final; | ||
Array<Expr> output_shape(size_t i) const final; | ||
Array<Tensor> InputTensors() const final; | ||
Operation ReplaceInputs( | ||
const Operation& self, | ||
|
@@ -208,18 +231,11 @@ class TVM_DLL ComputeOpNode : public OperationNode { | |
const Operation& self, | ||
const std::unordered_map<const Variable*, IntSet>& dom_map, | ||
std::unordered_map<Tensor, TensorDom>* out_dom_map) const final; | ||
void GatherBound( | ||
const Operation& self, | ||
const std::unordered_map<Tensor, TensorDom>& tensor_dom, | ||
std::unordered_map<IterVar, Range>* out_dom_map) const final; | ||
Stmt BuildRealize( | ||
const Stage& stage, | ||
const std::unordered_map<IterVar, Range>& realize_map, | ||
const Stmt& body) const final; | ||
Stmt BuildProvide( | ||
const Stage& stage, | ||
const std::unordered_map<IterVar, Range>& dom_map, | ||
bool debug_keep_trivial_loop) const final; | ||
size_t num_schedulable_dims() const final; | ||
|
||
void VisitAttrs(AttrVisitor* v) final { | ||
v->Visit("name", &name); | ||
|
@@ -236,18 +252,14 @@ class TVM_DLL ComputeOpNode : public OperationNode { | |
Array<Expr> body); | ||
|
||
static constexpr const char* _type_key = "ComputeOp"; | ||
TVM_DECLARE_NODE_TYPE_INFO(ComputeOpNode, OperationNode); | ||
TVM_DECLARE_NODE_TYPE_INFO(ComputeOpNode, BaseComputeOpNode); | ||
}; | ||
|
||
/*! | ||
* \brief A TenorCompute op that compute a tensor with an tensor intrinsic. | ||
*/ | ||
class TensorComputeOpNode : public OperationNode { | ||
class TensorComputeOpNode : public BaseComputeOpNode { | ||
public: | ||
/*! \brief IterVar on each axis */ | ||
Array<IterVar> axis; | ||
/*! \brief IterVar on each reduction axis, if the intrin will use the reduce axis */ | ||
Array<IterVar> reduce_axis; | ||
/*! \brief number of axes that can be scheduled */ | ||
int schedulable_ndim; | ||
/*! \brief TensorIntrin used to compute */ | ||
|
@@ -260,9 +272,7 @@ class TensorComputeOpNode : public OperationNode { | |
TensorComputeOpNode() {} | ||
// override functions | ||
int num_outputs() const final; | ||
Array<IterVar> root_iter_vars() const final; | ||
Type output_dtype(size_t i) const final; | ||
Array<Expr> output_shape(size_t i) const final; | ||
Array<Tensor> InputTensors() const final; | ||
Operation ReplaceInputs( | ||
const Operation& self, | ||
|
@@ -271,18 +281,11 @@ class TensorComputeOpNode : public OperationNode { | |
const Operation& self, | ||
const std::unordered_map<const Variable*, IntSet>& dom_map, | ||
std::unordered_map<Tensor, TensorDom>* out_dom_map) const final; | ||
void GatherBound( | ||
const Operation& self, | ||
const std::unordered_map<Tensor, TensorDom>& tensor_dom, | ||
std::unordered_map<IterVar, Range>* out_dom_map) const final; | ||
Stmt BuildRealize( | ||
const Stage& stage, | ||
const std::unordered_map<IterVar, Range>& realize_map, | ||
const Stmt& body) const final; | ||
Stmt BuildProvide( | ||
const Stage& stage, | ||
const std::unordered_map<IterVar, Range>& dom_map, | ||
bool debug_keep_trivial_loop) const final; | ||
size_t num_schedulable_dims() const final; | ||
|
||
void VisitAttrs(AttrVisitor* v) final { | ||
v->Visit("name", &name); | ||
|
@@ -304,7 +307,7 @@ class TensorComputeOpNode : public OperationNode { | |
Array<Region> regions); | ||
|
||
static constexpr const char* _type_key = "TensorComputeOp"; | ||
TVM_DECLARE_NODE_TYPE_INFO(TensorComputeOpNode, OperationNode); | ||
TVM_DECLARE_NODE_TYPE_INFO(TensorComputeOpNode, BaseComputeOpNode); | ||
}; | ||
|
||
/*! | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
stressing that it is the base class of
ComputeOp
andTensorComputeOp
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
done.