diff --git a/include/tvm/arith/analyzer.h b/include/tvm/arith/analyzer.h index a9a0bed6712a..cd20bdcf4d1a 100644 --- a/include/tvm/arith/analyzer.h +++ b/include/tvm/arith/analyzer.h @@ -320,10 +320,10 @@ class CanonicalSimplifier { * arith::Analyzer analyzer; * { * With scope(&analyzer, x % 3 == 0); - * CHECK_EQ(analyzer.modular_set(x)->coeff, 3); + * ICHECK_EQ(analyzer.modular_set(x)->coeff, 3); * } * // constraint no longer in effect. - * CHECK_NE(analyzer.modular_set(x)->coeff, 3); + * ICHECK_NE(analyzer.modular_set(x)->coeff, 3); * * \endcode */ diff --git a/include/tvm/ir/attrs.h b/include/tvm/ir/attrs.h index e92baf12b05f..afb8ef0730e0 100644 --- a/include/tvm/ir/attrs.h +++ b/include/tvm/ir/attrs.h @@ -428,7 +428,7 @@ inline void SetValue(double* ptr, const TVMArgValue& val) { *ptr = val.operator double(); } else { ObjectRef expr = val; - CHECK(expr.defined()); + ICHECK(expr.defined()); if (const IntImmNode* op = expr.as()) { *ptr = static_cast(op->value); } else if (const FloatImmNode* op = expr.as()) { @@ -664,7 +664,7 @@ class AttrsNode : public BaseAttrsNode { } void InitByPackedArgs(const runtime::TVMArgs& args, bool allow_unknown) final { - CHECK_EQ(args.size() % 2, 0); + ICHECK_EQ(args.size() % 2, 0); const int kLinearSearchBound = 16; int hit_count = 0; // applies two stratgies to lookup @@ -672,7 +672,7 @@ class AttrsNode : public BaseAttrsNode { // linear search. auto ffind = [&args](const char* key, runtime::TVMArgValue* val) { for (int i = 0; i < args.size(); i += 2) { - CHECK_EQ(args.type_codes[i], kTVMStr); + ICHECK_EQ(args.type_codes[i], kTVMStr); if (!std::strcmp(key, args.values[i].v_str)) { *val = args[i + 1]; return true; @@ -687,7 +687,7 @@ class AttrsNode : public BaseAttrsNode { // construct a map then do lookup. std::unordered_map kwargs; for (int i = 0; i < args.size(); i += 2) { - CHECK_EQ(args.type_codes[i], kTVMStr); + ICHECK_EQ(args.type_codes[i], kTVMStr); kwargs[args[i].operator std::string()] = args[i + 1]; } auto ffind = [&kwargs](const char* key, runtime::TVMArgValue* val) { diff --git a/include/tvm/ir/diagnostic.h b/include/tvm/ir/diagnostic.h index 2a2a6cd4e867..2053a295a3b8 100644 --- a/include/tvm/ir/diagnostic.h +++ b/include/tvm/ir/diagnostic.h @@ -149,7 +149,7 @@ class DiagnosticRenderer : public ObjectRef { void Render(const DiagnosticContext& ctx); DiagnosticRendererNode* operator->() { - CHECK(get() != nullptr); + ICHECK(get() != nullptr); return static_cast(get_mutable()); } @@ -203,7 +203,7 @@ class DiagnosticContext : public ObjectRef { void Render(); DiagnosticContextNode* operator->() { - CHECK(get() != nullptr); + ICHECK(get() != nullptr); return static_cast(get_mutable()); } diff --git a/include/tvm/ir/env_func.h b/include/tvm/ir/env_func.h index 65653b75562d..386666a2c50c 100644 --- a/include/tvm/ir/env_func.h +++ b/include/tvm/ir/env_func.h @@ -83,7 +83,7 @@ class EnvFunc : public ObjectRef { template runtime::TVMRetValue operator()(Args&&... args) const { const EnvFuncNode* n = operator->(); - CHECK(n != nullptr); + ICHECK(n != nullptr); return n->func(std::forward(args)...); } /*! @@ -137,7 +137,7 @@ class TypedEnvFunc : public ObjectRef { */ R operator()(Args... args) const { const EnvFuncNode* n = operator->(); - CHECK(n != nullptr); + ICHECK(n != nullptr); return runtime::detail::typed_packed_call_dispatcher::run(n->func, std::forward(args)...); } diff --git a/include/tvm/ir/expr.h b/include/tvm/ir/expr.h index d6cfc5a64121..c982c5cf850b 100644 --- a/include/tvm/ir/expr.h +++ b/include/tvm/ir/expr.h @@ -386,7 +386,7 @@ class Integer : public IntImm { * \brief convert to int64_t */ operator int64_t() const { - CHECK(data_ != nullptr) << " Trying to reference a null Integer"; + ICHECK(data_ != nullptr) << " Trying to reference a null Integer"; return (*this)->value; } // comparators @@ -461,9 +461,9 @@ class Range : public ObjectRef { // implementataions inline const Type& RelayExprNode::checked_type() const { - CHECK(checked_type_.defined()) << "internal error: the type checker has " - << "not populated the checked_type " - << "field for " << GetRef(this); + ICHECK(checked_type_.defined()) << "internal error: the type checker has " + << "not populated the checked_type " + << "field for " << GetRef(this); return this->checked_type_; } @@ -471,11 +471,11 @@ template inline const TTypeNode* RelayExprNode::type_as() const { static_assert(std::is_base_of::value, "TType must be a special case of type"); - CHECK(checked_type_.defined()) + ICHECK(checked_type_.defined()) << "Type inference for this Expr has not completed. Try to call infer_type pass."; const TTypeNode* node = checked_type_.as(); - CHECK(node != nullptr) << "Expected type to be " << TTypeNode::_type_key << ", but get " - << checked_type_->GetTypeKey(); + ICHECK(node != nullptr) << "Expected type to be " << TTypeNode::_type_key << ", but get " + << checked_type_->GetTypeKey(); return node; } @@ -522,7 +522,7 @@ struct PackedFuncValueConverter { } if (val.type_code() == kTVMArgInt) { int v = val.operator int(); - CHECK(v == 0 || v == 1) << "ValueError: boolean value can only be 0 or 1, but get " << v; + ICHECK(v == 0 || v == 1) << "ValueError: boolean value can only be 0 or 1, but get " << v; return Bool(static_cast(v)); } return val.AsObjectRef(); diff --git a/include/tvm/ir/module.h b/include/tvm/ir/module.h index b3f8438f6ec9..d6fb6a20b58a 100644 --- a/include/tvm/ir/module.h +++ b/include/tvm/ir/module.h @@ -300,7 +300,7 @@ class IRModule : public ObjectRef { /*! \return mutable pointers to the node. */ IRModuleNode* operator->() const { auto* ptr = get_mutable(); - CHECK(ptr != nullptr); + ICHECK(ptr != nullptr); return static_cast(ptr); } diff --git a/include/tvm/ir/op.h b/include/tvm/ir/op.h index e7b35778d500..c73be3c1e564 100644 --- a/include/tvm/ir/op.h +++ b/include/tvm/ir/op.h @@ -146,7 +146,7 @@ class OpNode : public RelayExprNode { // Internal function to compute if it is primitive op bool IsPrimitiveOp_() const { const auto& fn_ty = this->op_type; - CHECK(fn_ty.get() != nullptr); + ICHECK(fn_ty.get() != nullptr); if (fn_ty->type_constraints.size() != 1) return false; const TypeRelationNode* rel = fn_ty->type_constraints[0].as(); if (rel == nullptr) return false; @@ -462,7 +462,7 @@ inline OpRegEntry& OpRegEntry::set_support_level(int32_t n) { // NOLINT(*) template inline OpRegEntry& OpRegEntry::set_attr( // NOLINT(*) const std::string& attr_name, const ValueType& value, int plevel) { - CHECK_GT(plevel, 0) << "plevel in set_attr must be greater than 0"; + ICHECK_GT(plevel, 0) << "plevel in set_attr must be greater than 0"; runtime::TVMRetValue rv; rv = value; UpdateAttr(attr_name, rv, plevel); @@ -473,7 +473,7 @@ inline OpRegEntry& OpRegEntry::set_attr( // NOLINT(*) template inline ValueType OpAttrMap::get(const RelayExpr& expr, ValueType def_value) const { - CHECK(expr.defined()); + ICHECK(expr.defined()); if (const OpNode* op = expr.as()) { return this->map_.get(GetRef(op), def_value); } else { diff --git a/include/tvm/ir/transform.h b/include/tvm/ir/transform.h index 2bbf28311b30..d2931123073b 100644 --- a/include/tvm/ir/transform.h +++ b/include/tvm/ir/transform.h @@ -166,7 +166,7 @@ class PassContext : public ObjectRef { * \return const access pointer. */ const PassContextNode* operator->() const { - CHECK(get() != nullptr); + ICHECK(get() != nullptr); return static_cast(get()); } /*! @@ -174,7 +174,7 @@ class PassContext : public ObjectRef { * \return mutable access pointer. */ PassContextNode* operator->() { - CHECK(get() != nullptr); + ICHECK(get() != nullptr); return static_cast(get_mutable()); } @@ -344,7 +344,7 @@ class Pass : public ObjectRef { */ IRModule operator()(IRModule mod) const { const PassNode* node = operator->(); - CHECK(node != nullptr); + ICHECK(node != nullptr); return node->operator()(std::move(mod)); } /*! @@ -357,7 +357,7 @@ class Pass : public ObjectRef { */ IRModule operator()(IRModule mod, const PassContext& pass_ctx) const { const PassNode* node = operator->(); - CHECK(node != nullptr); + ICHECK(node != nullptr); return node->operator()(std::move(mod), pass_ctx); } diff --git a/include/tvm/ir/type_functor.h b/include/tvm/ir/type_functor.h index 2a6314cf7644..11bf7d4740d0 100644 --- a/include/tvm/ir/type_functor.h +++ b/include/tvm/ir/type_functor.h @@ -71,7 +71,7 @@ class TypeFunctor { * \return The result of the call */ virtual R VisitType(const Type& n, Args... args) { - CHECK(n.defined()); + ICHECK(n.defined()); static FType vtable = InitVTable(); return vtable(n, this, std::forward(args)...); } diff --git a/include/tvm/node/attr_registry_map.h b/include/tvm/node/attr_registry_map.h index 9c554af9bc21..552aa7114657 100644 --- a/include/tvm/node/attr_registry_map.h +++ b/include/tvm/node/attr_registry_map.h @@ -56,9 +56,9 @@ class AttrRegistryMapContainerMap { * \return the const reference to the content value. */ const runtime::TVMRetValue& operator[](const KeyType& key) const { - CHECK(key.defined()); + ICHECK(key.defined()); const uint32_t idx = key->AttrRegistryIndex(); - CHECK(idx < data_.size() && data_[idx].second != 0) + ICHECK(idx < data_.size() && data_[idx].second != 0) << "Attribute " << attr_name_ << " has not been registered for " << key->name; return data_[idx].first; } @@ -71,7 +71,7 @@ class AttrRegistryMapContainerMap { */ template ValueType get(const KeyType& key, ValueType def_value) const { - CHECK(key.defined()); + ICHECK(key.defined()); const uint32_t idx = key->AttrRegistryIndex(); if (idx < data_.size() && data_[idx].second != 0) { return data_[idx].first; diff --git a/include/tvm/node/container.h b/include/tvm/node/container.h index 74dabc168924..209bb9e72f33 100644 --- a/include/tvm/node/container.h +++ b/include/tvm/node/container.h @@ -351,7 +351,7 @@ class SmallMapNode : public MapNode, */ const mapped_type& at(const key_type& key) const { iterator itr = find(key); - CHECK(itr.index < size_) << "IndexError: key is not in Map"; + ICHECK(itr.index < size_) << "IndexError: key is not in Map"; return itr->second; } /*! @@ -361,7 +361,7 @@ class SmallMapNode : public MapNode, */ mapped_type& at(const key_type& key) { iterator itr = find(key); - CHECK(itr.index < size_) << "IndexError: key is not in Map"; + ICHECK(itr.index < size_) << "IndexError: key is not in Map"; return itr->second; } /*! \return begin iterator */ @@ -466,7 +466,7 @@ class SmallMapNode : public MapNode, } uint64_t next_size = std::max(map_node->slots_ * 2, uint64_t(kInitSize)); next_size = std::min(next_size, uint64_t(kMaxSize)); - CHECK_GT(next_size, map_node->slots_); + ICHECK_GT(next_size, map_node->slots_); ObjectPtr new_map = CreateFromRange(next_size, map_node->begin(), map_node->end()); InsertMaybeReHash(kv, &new_map); *map = std::move(new_map); @@ -656,7 +656,7 @@ class DenseMapNode : public MapNode { */ mapped_type& At(const key_type& key) const { ListNode iter = Search(key); - CHECK(!iter.IsNone()) << "IndexError: key is not in Map"; + ICHECK(!iter.IsNone()) << "IndexError: key is not in Map"; return iter.Val(); } /*! @@ -823,7 +823,7 @@ class DenseMapNode : public MapNode { * \return The object created */ static ObjectPtr Empty(uint32_t fib_shift, uint64_t n_slots) { - CHECK_GT(n_slots, uint64_t(SmallMapNode::kMaxSize)); + ICHECK_GT(n_slots, uint64_t(SmallMapNode::kMaxSize)); ObjectPtr p = make_object(); uint64_t n_blocks = CalcNumBlocks(n_slots - 1); Block* block = p->data_ = new Block[n_blocks]; @@ -855,7 +855,7 @@ class DenseMapNode : public MapNode { for (int j = 0; j < kBlockCap; ++j, ++meta_ptr_from, ++data_ptr_from, ++meta_ptr_to, ++data_ptr_to) { uint8_t& meta = *meta_ptr_to = *meta_ptr_from; - CHECK(meta != kProtectedSlot); + ICHECK(meta != kProtectedSlot); if (meta != uint8_t(kEmptySlot)) { new (data_ptr_to) KVType(*data_ptr_from); } @@ -876,7 +876,7 @@ class DenseMapNode : public MapNode { iter.Val() = kv.second; return; } - CHECK_GT(map_node->slots_, uint64_t(SmallMapNode::kMaxSize)); + ICHECK_GT(map_node->slots_, uint64_t(SmallMapNode::kMaxSize)); // Otherwise, start rehash ObjectPtr p = Empty(map_node->fib_shift_ - 1, map_node->slots_ * 2 + 2); // Insert the given `kv` into the new hash map @@ -963,7 +963,7 @@ class DenseMapNode : public MapNode { shift -= 1; slots <<= 1; } - CHECK_GT(slots, cap); + ICHECK_GT(slots, cap); if (slots < cap * 2) { *fib_shift = shift - 1; *n_slots = slots << 1; diff --git a/include/tvm/node/functor.h b/include/tvm/node/functor.h index 0837f35bd715..9920500ffe98 100644 --- a/include/tvm/node/functor.h +++ b/include/tvm/node/functor.h @@ -92,8 +92,8 @@ class NodeFunctor { * \return The result. */ R operator()(const ObjectRef& n, Args... args) const { - CHECK(can_dispatch(n)) << "NodeFunctor calls un-registered function on type " - << n->GetTypeKey(); + ICHECK(can_dispatch(n)) << "NodeFunctor calls un-registered function on type " + << n->GetTypeKey(); return (*func_[n->type_index()])(n, std::forward(args)...); } /*! @@ -108,7 +108,7 @@ class NodeFunctor { if (func_.size() <= tindex) { func_.resize(tindex + 1, nullptr); } - CHECK(func_[tindex] == nullptr) << "Dispatch for " << TNode::_type_key << " is already set"; + ICHECK(func_[tindex] == nullptr) << "Dispatch for " << TNode::_type_key << " is already set"; func_[tindex] = f; return *this; } @@ -121,7 +121,7 @@ class NodeFunctor { template TSelf& clear_dispatch() { // NOLINT(*) uint32_t tindex = TNode::RuntimeTypeIndex(); - CHECK_LT(tindex, func_.size()) << "clear_dispatch: index out of range"; + ICHECK_LT(tindex, func_.size()) << "clear_dispatch: index out of range"; func_[tindex] = nullptr; return *this; } diff --git a/include/tvm/node/reflection.h b/include/tvm/node/reflection.h index e8ff26be42b3..d842c33cce03 100644 --- a/include/tvm/node/reflection.h +++ b/include/tvm/node/reflection.h @@ -208,7 +208,7 @@ class ReflectionVTable::Registry { * \return rference to self. */ Registry& set_creator(FCreate f) { // NOLINT(*) - CHECK_LT(type_index_, parent_->fcreate_.size()); + ICHECK_LT(type_index_, parent_->fcreate_.size()); parent_->fcreate_[type_index_] = f; return *this; } @@ -218,7 +218,7 @@ class ReflectionVTable::Registry { * \return rference to self. */ Registry& set_repr_bytes(FReprBytes f) { // NOLINT(*) - CHECK_LT(type_index_, parent_->frepr_bytes_.size()); + ICHECK_LT(type_index_, parent_->frepr_bytes_.size()); parent_->frepr_bytes_[type_index_] = f; return *this; } diff --git a/include/tvm/parser/source_map.h b/include/tvm/parser/source_map.h index 1153deb95dc3..424af5c98cc8 100644 --- a/include/tvm/parser/source_map.h +++ b/include/tvm/parser/source_map.h @@ -108,7 +108,7 @@ class SourceMap : public ObjectRef { void Add(const Source& source); SourceMapNode* operator->() { - CHECK(get() != nullptr); + ICHECK(get() != nullptr); return static_cast(get_mutable()); } diff --git a/include/tvm/relay/base.h b/include/tvm/relay/base.h index 76a6a221d065..e94bd2756e98 100644 --- a/include/tvm/relay/base.h +++ b/include/tvm/relay/base.h @@ -42,18 +42,18 @@ namespace tvm { */ namespace relay { -#define RELAY_DEBUG(...) \ - { \ - auto fdebug = runtime::Registry::Get("relay.debug"); \ - CHECK(fdebug) << "Could not find Relay Python debugger function."; \ - (*fdebug)("RELAY_DEBUG", __FILE__, __LINE__, __VA_ARGS__); \ +#define RELAY_DEBUG(...) \ + { \ + auto fdebug = runtime::Registry::Get("relay.debug"); \ + ICHECK(fdebug) << "Could not find Relay Python debugger function."; \ + (*fdebug)("RELAY_DEBUG", __FILE__, __LINE__, __VA_ARGS__); \ } -#define RELAY_DEBUG_INTERP(...) \ - { \ - auto fdebug = runtime::Registry::Get("relay.debug_interp"); \ - CHECK(fdebug) << "Could not find Relay Python debugger function."; \ - (*fdebug)("RELAY_DEBUG", __FILE__, __LINE__, __VA_ARGS__); \ +#define RELAY_DEBUG_INTERP(...) \ + { \ + auto fdebug = runtime::Registry::Get("relay.debug_interp"); \ + ICHECK(fdebug) << "Could not find Relay Python debugger function."; \ + (*fdebug)("RELAY_DEBUG", __FILE__, __LINE__, __VA_ARGS__); \ } /*! diff --git a/include/tvm/relay/dataflow_pattern_functor.h b/include/tvm/relay/dataflow_pattern_functor.h index 98c81c929409..364daac81cc8 100644 --- a/include/tvm/relay/dataflow_pattern_functor.h +++ b/include/tvm/relay/dataflow_pattern_functor.h @@ -76,7 +76,7 @@ class DFPatternFunctor { * \return The result of the call */ virtual R VisitDFPattern(const DFPattern& n, Args... args) { - CHECK(n.defined()); + ICHECK(n.defined()); static FType vtable = InitVTable(); return vtable(n, this, std::forward(args)...); } diff --git a/include/tvm/relay/expr_functor.h b/include/tvm/relay/expr_functor.h index c3d2f724b736..df0940fa7482 100644 --- a/include/tvm/relay/expr_functor.h +++ b/include/tvm/relay/expr_functor.h @@ -87,7 +87,7 @@ class ExprFunctor { * \return The result of the call */ virtual R VisitExpr(const Expr& n, Args... args) { - CHECK(n.defined()); + ICHECK(n.defined()); static FType vtable = InitVTable(); return vtable(n, this, std::forward(args)...); } @@ -345,7 +345,7 @@ class ExprRewriter { * \return The result of the call */ virtual Expr Rewrite(const Expr& pre, const Expr& post) { - CHECK(pre.defined()); + ICHECK(pre.defined()); static FType vtable = InitVTable(); return vtable(pre, this, post); } diff --git a/include/tvm/relay/pattern_functor.h b/include/tvm/relay/pattern_functor.h index de3bafa49074..711d8323f158 100644 --- a/include/tvm/relay/pattern_functor.h +++ b/include/tvm/relay/pattern_functor.h @@ -89,7 +89,7 @@ class PatternFunctor { * \return The result of the call */ virtual R VisitPattern(const Pattern& n, Args... args) { - CHECK(n.defined()); + ICHECK(n.defined()); static FType vtable = InitVTable(); return vtable(n, this, std::forward(args)...); } diff --git a/include/tvm/runtime/container.h b/include/tvm/runtime/container.h index 7778c5d8424c..796ab7b113c1 100644 --- a/include/tvm/runtime/container.h +++ b/include/tvm/runtime/container.h @@ -146,7 +146,7 @@ class InplaceArrayBase { */ const ElemType& operator[](size_t idx) const { size_t size = Self()->GetSize(); - CHECK_LT(idx, size) << "Index " << idx << " out of bounds " << size << "\n"; + ICHECK_LT(idx, size) << "Index " << idx << " out of bounds " << size << "\n"; return *(reinterpret_cast(AddressOf(idx))); } @@ -157,7 +157,7 @@ class InplaceArrayBase { */ ElemType& operator[](size_t idx) { size_t size = Self()->GetSize(); - CHECK_LT(idx, size) << "Index " << idx << " out of bounds " << size << "\n"; + ICHECK_LT(idx, size) << "Index " << idx << " out of bounds " << size << "\n"; return *(reinterpret_cast(AddressOf(idx))); } @@ -361,7 +361,7 @@ class ArrayNode : public Object, public InplaceArrayBase { */ static ObjectPtr CopyFrom(int64_t cap, ArrayNode* from) { int64_t size = from->size_; - CHECK_GE(cap, size) << "ValueError: not enough capacity"; + ICHECK_GE(cap, size) << "ValueError: not enough capacity"; ObjectPtr p = ArrayNode::Empty(cap); ObjectRef* write = p->MutableBegin(); ObjectRef* read = from->MutableBegin(); @@ -380,7 +380,7 @@ class ArrayNode : public Object, public InplaceArrayBase { */ static ObjectPtr MoveFrom(int64_t cap, ArrayNode* from) { int64_t size = from->size_; - CHECK_GE(cap, size) << "ValueError: not enough capacity"; + ICHECK_GE(cap, size) << "ValueError: not enough capacity"; ObjectPtr p = ArrayNode::Empty(cap); ObjectRef* write = p->MutableBegin(); ObjectRef* read = from->MutableBegin(); @@ -429,7 +429,7 @@ class ArrayNode : public Object, public InplaceArrayBase { * \return Ref-counted ArrayNode requested */ static ObjectPtr Empty(int64_t n = kInitSize) { - CHECK_GE(n, 0); + ICHECK_GE(n, 0); ObjectPtr p = make_inplace_array_object(n); p->capacity_ = n; p->size_ = 0; @@ -679,9 +679,9 @@ class Array : public ObjectRef { */ const T operator[](int64_t i) const { ArrayNode* p = GetArrayNode(); - CHECK(p != nullptr) << "ValueError: cannot index a null array"; - CHECK(0 <= i && i < p->size_) << "IndexError: indexing " << i << " on an array of size " - << p->size_; + ICHECK(p != nullptr) << "ValueError: cannot index a null array"; + ICHECK(0 <= i && i < p->size_) + << "IndexError: indexing " << i << " on an array of size " << p->size_; return DowncastNoCheck(*(p->begin() + i)); } @@ -703,16 +703,16 @@ class Array : public ObjectRef { /*! \return The first element of the array */ const T front() const { ArrayNode* p = GetArrayNode(); - CHECK(p != nullptr) << "ValueError: cannot index a null array"; - CHECK_GT(p->size_, 0) << "IndexError: cannot index an empty array"; + ICHECK(p != nullptr) << "ValueError: cannot index a null array"; + ICHECK_GT(p->size_, 0) << "IndexError: cannot index an empty array"; return DowncastNoCheck(*(p->begin())); } /*! \return The last element of the array */ const T back() const { ArrayNode* p = GetArrayNode(); - CHECK(p != nullptr) << "ValueError: cannot index a null array"; - CHECK_GT(p->size_, 0) << "IndexError: cannot index an empty array"; + ICHECK(p != nullptr) << "ValueError: cannot index a null array"; + ICHECK_GT(p->size_, 0) << "IndexError: cannot index an empty array"; return DowncastNoCheck(*(p->end() - 1)); } @@ -734,7 +734,7 @@ class Array : public ObjectRef { * \param val The element to insert */ void insert(iterator position, const T& val) { - CHECK(data_ != nullptr) << "ValueError: cannot insert a null array"; + ICHECK(data_ != nullptr) << "ValueError: cannot insert a null array"; int64_t idx = std::distance(begin(), position); int64_t size = GetArrayNode()->size_; auto addr = CopyOnWrite(1) // @@ -755,7 +755,7 @@ class Array : public ObjectRef { if (first == last) { return; } - CHECK(data_ != nullptr) << "ValueError: cannot insert a null array"; + ICHECK(data_ != nullptr) << "ValueError: cannot insert a null array"; int64_t idx = std::distance(begin(), position); int64_t size = GetArrayNode()->size_; int64_t numel = std::distance(first, last); @@ -767,9 +767,9 @@ class Array : public ObjectRef { /*! \brief Remove the last item of the list */ void pop_back() { - CHECK(data_ != nullptr) << "ValueError: cannot pop_back because array is null"; + ICHECK(data_ != nullptr) << "ValueError: cannot pop_back because array is null"; int64_t size = GetArrayNode()->size_; - CHECK_GT(size, 0) << "ValueError: cannot pop_back because array is empty"; + ICHECK_GT(size, 0) << "ValueError: cannot pop_back because array is empty"; CopyOnWrite()->ShrinkBy(1); } @@ -778,11 +778,11 @@ class Array : public ObjectRef { * \param position An iterator pointing to the element to be erased */ void erase(iterator position) { - CHECK(data_ != nullptr) << "ValueError: cannot erase a null array"; + ICHECK(data_ != nullptr) << "ValueError: cannot erase a null array"; int64_t st = std::distance(begin(), position); int64_t size = GetArrayNode()->size_; - CHECK(0 <= st && st < size) << "ValueError: cannot erase at index " << st - << ", because Array size is " << size; + ICHECK(0 <= st && st < size) << "ValueError: cannot erase at index " << st + << ", because Array size is " << size; CopyOnWrite() // ->MoveElementsLeft(st, st + 1, size) // ->ShrinkBy(1); @@ -797,12 +797,12 @@ class Array : public ObjectRef { if (first == last) { return; } - CHECK(data_ != nullptr) << "ValueError: cannot erase a null array"; + ICHECK(data_ != nullptr) << "ValueError: cannot erase a null array"; int64_t size = GetArrayNode()->size_; int64_t st = std::distance(begin(), first); int64_t ed = std::distance(begin(), last); - CHECK_LT(st, ed) << "ValueError: cannot erase array in range [" << st << ", " << ed << ")"; - CHECK(0 <= st && st <= size && 0 <= ed && ed <= size) + ICHECK_LT(st, ed) << "ValueError: cannot erase array in range [" << st << ", " << ed << ")"; + ICHECK(0 <= st && st <= size && 0 <= ed && ed <= size) << "ValueError: cannot erase array in range [" << st << ", " << ed << ")" << ", because array size is " << size; CopyOnWrite() // @@ -815,7 +815,7 @@ class Array : public ObjectRef { * \param n The new size. */ void resize(int64_t n) { - CHECK_GE(n, 0) << "ValueError: cannot resize an Array to negative size"; + ICHECK_GE(n, 0) << "ValueError: cannot resize an Array to negative size"; if (data_ == nullptr) { SwitchContainer(n); return; @@ -856,8 +856,8 @@ class Array : public ObjectRef { */ void Set(int64_t i, T value) { ArrayNode* p = this->CopyOnWrite(); - CHECK(0 <= i && i < p->size_) << "IndexError: indexing " << i << " on an array of size " - << p->size_; + ICHECK(0 <= i && i < p->size_) + << "IndexError: indexing " << i << " on an array of size " << p->size_; *(p->MutableBegin() + i) = std::move(value); } @@ -923,7 +923,7 @@ class Array : public ObjectRef { template void Assign(IterType first, IterType last) { int64_t cap = std::distance(first, last); - CHECK_GE(cap, 0) << "ValueError: cannot construct an Array of negative size"; + ICHECK_GE(cap, 0) << "ValueError: cannot construct an Array of negative size"; ArrayNode* p = GetArrayNode(); if (p != nullptr && data_.unique() && p->capacity_ >= cap) { // do not have to make new space @@ -1565,8 +1565,8 @@ struct NullOptType {}; * * Optional opt0 = nullptr; * Optional opt1 = String("xyz"); - * CHECK(opt0 == nullptr); - * CHECK(opt1 == "xyz"); + * ICHECK(opt0 == nullptr); + * ICHECK(opt1 == "xyz"); * * \endcode */ @@ -1613,7 +1613,7 @@ class Optional : public ObjectRef { * \note This function performs not-null checking. */ T value() const { - CHECK(data_ != nullptr); + ICHECK(data_ != nullptr); return T(data_); } /*! diff --git a/include/tvm/runtime/data_type.h b/include/tvm/runtime/data_type.h index cb817a89ab81..25aadb598b28 100644 --- a/include/tvm/runtime/data_type.h +++ b/include/tvm/runtime/data_type.h @@ -24,8 +24,8 @@ #ifndef TVM_RUNTIME_DATA_TYPE_H_ #define TVM_RUNTIME_DATA_TYPE_H_ -#include #include +#include #include #include @@ -74,7 +74,7 @@ class DataType { data_.bits = static_cast(bits); data_.lanes = static_cast(lanes); if (code == kBFloat) { - CHECK_EQ(bits, 16); + ICHECK_EQ(bits, 16); } } /*! \return The type code. */ @@ -212,7 +212,7 @@ inline int GetVectorBytes(DataType dtype) { dtype == DataType::Int(1)) { return 1; } - CHECK_EQ(data_bits % 8, 0U) << "Need to load/store by multiple of bytes"; + ICHECK_EQ(data_bits % 8, 0U) << "Need to load/store by multiple of bytes"; return data_bits / 8; } @@ -373,7 +373,7 @@ inline DLDataType String2DLDataType(std::string s) { if (*xdelim == 'x') { t.lanes = static_cast(strtoul(xdelim + 1, &endpt, 10)); } - CHECK(endpt == s.c_str() + s.length()) << "unknown type " << s; + ICHECK(endpt == s.c_str() + s.length()) << "unknown type " << s; return t; } diff --git a/include/tvm/runtime/ndarray.h b/include/tvm/runtime/ndarray.h index 92b3857fbec8..0ff171d4821f 100644 --- a/include/tvm/runtime/ndarray.h +++ b/include/tvm/runtime/ndarray.h @@ -325,29 +325,29 @@ inline bool NDArray::IsContiguous() const { } inline void NDArray::CopyFrom(const DLTensor* other) { - CHECK(data_ != nullptr); + ICHECK(data_ != nullptr); CopyFromTo(other, &(get_mutable()->dl_tensor)); } inline void NDArray::CopyFrom(const NDArray& other) { - CHECK(data_ != nullptr); - CHECK(other.data_ != nullptr); + ICHECK(data_ != nullptr); + ICHECK(other.data_ != nullptr); CopyFromTo(&(other.get_mutable()->dl_tensor), &(get_mutable()->dl_tensor)); } inline void NDArray::CopyTo(DLTensor* other) const { - CHECK(data_ != nullptr); + ICHECK(data_ != nullptr); CopyFromTo(&(get_mutable()->dl_tensor), other); } inline void NDArray::CopyTo(const NDArray& other) const { - CHECK(data_ != nullptr); - CHECK(other.data_ != nullptr); + ICHECK(data_ != nullptr); + ICHECK(other.data_ != nullptr); CopyFromTo(&(get_mutable()->dl_tensor), &(other.get_mutable()->dl_tensor)); } inline NDArray NDArray::CopyTo(const DLContext& ctx) const { - CHECK(data_ != nullptr); + ICHECK(data_ != nullptr); const DLTensor* dptr = operator->(); NDArray ret = Empty(std::vector(dptr->shape, dptr->shape + dptr->ndim), dptr->dtype, ctx); @@ -422,7 +422,7 @@ inline bool SaveDLTensor(dmlc::Stream* strm, const DLTensor* tensor) { strm->Write(tensor->data, data_byte_size); } else { std::vector bytes(data_byte_size); - CHECK_EQ( + ICHECK_EQ( TVMArrayCopyToBytes(const_cast(tensor), dmlc::BeginPtr(bytes), data_byte_size), 0) << TVMGetLastError(); @@ -438,19 +438,19 @@ inline void NDArray::Save(dmlc::Stream* strm) const { SaveDLTensor(strm, operato inline bool NDArray::Load(dmlc::Stream* strm) { uint64_t header, reserved; - CHECK(strm->Read(&header)) << "Invalid DLTensor file format"; - CHECK(strm->Read(&reserved)) << "Invalid DLTensor file format"; - CHECK(header == kTVMNDArrayMagic) << "Invalid DLTensor file format"; + ICHECK(strm->Read(&header)) << "Invalid DLTensor file format"; + ICHECK(strm->Read(&reserved)) << "Invalid DLTensor file format"; + ICHECK(header == kTVMNDArrayMagic) << "Invalid DLTensor file format"; DLContext ctx; int ndim; DLDataType dtype; - CHECK(strm->Read(&ctx)) << "Invalid DLTensor file format"; - CHECK(strm->Read(&ndim)) << "Invalid DLTensor file format"; - CHECK(strm->Read(&dtype)) << "Invalid DLTensor file format"; - CHECK_EQ(ctx.device_type, kDLCPU) << "Invalid DLTensor context: can only save as CPU tensor"; + ICHECK(strm->Read(&ctx)) << "Invalid DLTensor file format"; + ICHECK(strm->Read(&ndim)) << "Invalid DLTensor file format"; + ICHECK(strm->Read(&dtype)) << "Invalid DLTensor file format"; + ICHECK_EQ(ctx.device_type, kDLCPU) << "Invalid DLTensor context: can only save as CPU tensor"; std::vector shape(ndim); if (ndim != 0) { - CHECK(strm->ReadArray(&shape[0], ndim)) << "Invalid DLTensor file format"; + ICHECK(strm->ReadArray(&shape[0], ndim)) << "Invalid DLTensor file format"; } NDArray ret = NDArray::Empty(shape, dtype, ctx); int64_t num_elems = 1; @@ -459,12 +459,12 @@ inline bool NDArray::Load(dmlc::Stream* strm) { num_elems *= ret->shape[i]; } int64_t data_byte_size; - CHECK(strm->Read(&data_byte_size)) << "Invalid DLTensor file format"; - CHECK(data_byte_size == num_elems * elem_bytes) << "Invalid DLTensor file format"; + ICHECK(strm->Read(&data_byte_size)) << "Invalid DLTensor file format"; + ICHECK(data_byte_size == num_elems * elem_bytes) << "Invalid DLTensor file format"; auto read_ret = strm->Read(ret->data, data_byte_size); // Only check non-empty data if (ndim > 0 && shape[0] != 0) { - CHECK(read_ret) << "Invalid DLTensor file format"; + ICHECK(read_ret) << "Invalid DLTensor file format"; } if (!DMLC_IO_NO_ENDIAN_SWAP) { dmlc::ByteSwap(ret->data, elem_bytes, num_elems); diff --git a/include/tvm/runtime/packed_func.h b/include/tvm/runtime/packed_func.h index 2305f12e5533..43038998639e 100644 --- a/include/tvm/runtime/packed_func.h +++ b/include/tvm/runtime/packed_func.h @@ -195,7 +195,7 @@ class TypedPackedFunc { * // construct from packed function * TypedPackedFunc ftyped(packed); * // call the typed version. - * CHECK_EQ(ftyped(1), 2); + * ICHECK_EQ(ftyped(1), 2); * \endcode * * \param packed The packed function @@ -225,7 +225,7 @@ class TypedPackedFunc { * // construct from packed function * TypedPackedFunc ftyped(typed_lambda); * // call the typed version. - * CHECK_EQ(ftyped(1), 2); + * ICHECK_EQ(ftyped(1), 2); * \endcode * * \param typed_lambda typed lambda function. @@ -246,7 +246,7 @@ class TypedPackedFunc { * TypedPackedFunc ftyped; * ftyped = [](int x) { return x + 1; } * // call the typed version. - * CHECK_EQ(ftyped(1), 2); + * ICHECK_EQ(ftyped(1), 2); * \endcode * * \param typed_lambda typed lambda function. @@ -337,7 +337,7 @@ inline const char* ArgTypeCode2Str(int type_code); // macro to check type code. #define TVM_CHECK_TYPE_CODE(CODE, T) \ - CHECK_EQ(CODE, T) << " expected " << ArgTypeCode2Str(T) << " but get " << ArgTypeCode2Str(CODE) + ICHECK_EQ(CODE, T) << " expected " << ArgTypeCode2Str(T) << " but get " << ArgTypeCode2Str(CODE) /*! * \brief Type traits for runtime type check during FFI conversion. @@ -382,8 +382,8 @@ class TVMPODValue_ { } operator int() const { TVM_CHECK_TYPE_CODE(type_code_, kDLInt); - CHECK_LE(value_.v_int64, std::numeric_limits::max()); - CHECK_GE(value_.v_int64, std::numeric_limits::min()); + ICHECK_LE(value_.v_int64, std::numeric_limits::max()); + ICHECK_GE(value_.v_int64, std::numeric_limits::min()); return static_cast(value_.v_int64); } operator bool() const { @@ -491,7 +491,7 @@ class TVMArgValue : public TVMPODValue_ { } else if (type_code_ == kTVMStr) { return std::string(value_.v_str); } else { - CHECK(IsObjectRef()); + ICHECK(IsObjectRef()); return AsObjectRef().operator std::string(); } } @@ -719,7 +719,7 @@ class TVMRetValue : public TVMPODValue_ { */ void MoveToCHost(TVMValue* ret_value, int* ret_type_code) { // cannot move str; need specially handle. - CHECK(type_code_ != kTVMStr && type_code_ != kTVMBytes); + ICHECK(type_code_ != kTVMStr && type_code_ != kTVMBytes); *ret_value = value_; *ret_type_code = type_code_; type_code_ = kTVMNullptr; @@ -733,7 +733,7 @@ class TVMRetValue : public TVMPODValue_ { */ static TVMRetValue MoveFromCHost(TVMValue value, int type_code) { // Can move POD and everything under the object system. - CHECK(type_code <= kTVMPackedFuncHandle || type_code == kTVMNDArrayHandle); + ICHECK(type_code <= kTVMPackedFuncHandle || type_code == kTVMNDArrayHandle); TVMRetValue ret; ret.value_ = value; ret.type_code_ = type_code; @@ -741,8 +741,8 @@ class TVMRetValue : public TVMPODValue_ { } /*! \return The value field, if the data is POD */ const TVMValue& value() const { - CHECK(type_code_ != kTVMObjectHandle && type_code_ != kTVMPackedFuncHandle && - type_code_ != kTVMModuleHandle && type_code_ != kTVMStr) + ICHECK(type_code_ != kTVMObjectHandle && type_code_ != kTVMPackedFuncHandle && + type_code_ != kTVMModuleHandle && type_code_ != kTVMStr) << "TVMRetValue.value can only be used for POD data"; return value_; } @@ -966,8 +966,8 @@ struct PackedFuncValueConverter { } inline TVMArgValue TVMArgs::operator[](int i) const { - CHECK_LT(i, num_args) << "not enough argument passed, " << num_args << " passed" - << " but request arg[" << i << "]."; + ICHECK_LT(i, num_args) << "not enough argument passed, " << num_args << " passed" + << " but request arg[" << i << "]."; return TVMArgValue(values[i], type_codes[i]); } @@ -1090,7 +1090,7 @@ class TVMArgsSetter { } TVM_ALWAYS_INLINE void operator()(size_t i, uint64_t value) const { values_[i].v_int64 = static_cast(value); - CHECK_LE(value, static_cast(std::numeric_limits::max())); + ICHECK_LE(value, static_cast(std::numeric_limits::max())); type_codes_[i] = kDLInt; } TVM_ALWAYS_INLINE void operator()(size_t i, double value) const { @@ -1155,7 +1155,7 @@ class TVMArgsSetter { values_[i].v_str = value.ptr()->c_str(); type_codes_[i] = kTVMStr; } else { - CHECK_NE(value.type_code(), kTVMBytes) << "not handled."; + ICHECK_NE(value.type_code(), kTVMBytes) << "not handled."; values_[i] = value.value_; type_codes_[i] = value.type_code(); } @@ -1234,7 +1234,7 @@ struct unpack_call_dispatcher { template TVM_ALWAYS_INLINE void unpack_call(const F& f, const TVMArgs& args, TVMRetValue* rv) { - CHECK_EQ(nargs, args.size()) << "Expect " << nargs << " arguments but get " << args.size(); + ICHECK_EQ(nargs, args.size()) << "Expect " << nargs << " arguments but get " << args.size(); unpack_call_dispatcher::run(f, args, rv); } @@ -1363,7 +1363,7 @@ inline TObjectRef TVMPODValue_::AsObjectRef() const { using ContainerType = typename TObjectRef::ContainerType; if (type_code_ == kTVMNullptr) { - CHECK(TObjectRef::_type_is_nullable) + ICHECK(TObjectRef::_type_is_nullable) << "Expect a not null value of " << ContainerType::_type_key; return TObjectRef(ObjectPtr(nullptr)); } @@ -1373,7 +1373,7 @@ inline TObjectRef TVMPODValue_::AsObjectRef() const { TVM_CHECK_TYPE_CODE(type_code_, kTVMNDArrayHandle); ObjectPtr data = NDArray::FFIDataFromHandle(static_cast(value_.v_handle)); - CHECK(data->IsInstance()) + ICHECK(data->IsInstance()) << "Expect " << ContainerType::_type_key << " but get " << data->GetTypeKey(); return TObjectRef(data); } @@ -1381,20 +1381,20 @@ inline TObjectRef TVMPODValue_::AsObjectRef() const { // Casting to a sub-class of Module TVM_CHECK_TYPE_CODE(type_code_, kTVMModuleHandle); ObjectPtr data = GetObjectPtr(static_cast(value_.v_handle)); - CHECK(data->IsInstance()) + ICHECK(data->IsInstance()) << "Expect " << ContainerType::_type_key << " but get " << data->GetTypeKey(); return TObjectRef(data); } if (type_code_ == kTVMObjectHandle) { // normal object type check. Object* ptr = static_cast(value_.v_handle); - CHECK(ObjectTypeChecker::Check(ptr)) + ICHECK(ObjectTypeChecker::Check(ptr)) << "Expect " << ObjectTypeChecker::TypeName() << " but get " << ptr->GetTypeKey(); return TObjectRef(GetObjectPtr(ptr)); } else if (type_code_ == kTVMObjectRValueRefArg) { Object* ptr = *static_cast(value_.v_handle); - CHECK(ObjectTypeChecker::Check(ptr)) + ICHECK(ObjectTypeChecker::Check(ptr)) << "Expect " << ObjectTypeChecker::TypeName() << " but get " << ptr->GetTypeKey(); return TObjectRef(GetObjectPtr(ptr)); diff --git a/include/tvm/runtime/vm/bytecode.h b/include/tvm/runtime/vm/bytecode.h index edcbd881e074..e858c4458054 100644 --- a/include/tvm/runtime/vm/bytecode.h +++ b/include/tvm/runtime/vm/bytecode.h @@ -25,6 +25,7 @@ #define TVM_RUNTIME_VM_BYTECODE_H_ #include +#include #include #include diff --git a/include/tvm/support/logging.h b/include/tvm/support/logging.h index 4322435c06b0..d98363ea1c1b 100644 --- a/include/tvm/support/logging.h +++ b/include/tvm/support/logging.h @@ -125,13 +125,13 @@ constexpr const char* kTVM_INTERNAL_ERROR_MESSAGE = #define ICHECK_BINARY_OP(name, op, x, y) \ if (dmlc::LogCheckError _check_err = dmlc::LogCheck##name(x, y)) \ dmlc::LogMessageFatal(__FILE__, __LINE__).stream() \ - << kTVM_INTERNAL_ERROR_MESSAGE << std::endl \ + << tvm::kTVM_INTERNAL_ERROR_MESSAGE << std::endl \ << ICHECK_INDENT << "Check failed: " << #x " " #op " " #y << *(_check_err.str) << ": " #define ICHECK(x) \ if (!(x)) \ dmlc::LogMessageFatal(__FILE__, __LINE__).stream() \ - << kTVM_INTERNAL_ERROR_MESSAGE << ICHECK_INDENT << "Check failed: " #x << " == false: " + << tvm::kTVM_INTERNAL_ERROR_MESSAGE << ICHECK_INDENT << "Check failed: " #x << " == false: " #define ICHECK_LT(x, y) ICHECK_BINARY_OP(_LT, <, x, y) #define ICHECK_GT(x, y) ICHECK_BINARY_OP(_GT, >, x, y) @@ -139,10 +139,10 @@ constexpr const char* kTVM_INTERNAL_ERROR_MESSAGE = #define ICHECK_GE(x, y) ICHECK_BINARY_OP(_GE, >=, x, y) #define ICHECK_EQ(x, y) ICHECK_BINARY_OP(_EQ, ==, x, y) #define ICHECK_NE(x, y) ICHECK_BINARY_OP(_NE, !=, x, y) -#define ICHECK_NOTNULL(x) \ - ((x) == nullptr ? dmlc::LogMessageFatal(__FILE__, __LINE__).stream() \ - << kTVM_INTERNAL_ERROR_MESSAGE << __INDENT << "Check not null: " #x \ - << ' ', \ +#define ICHECK_NOTNULL(x) \ + ((x) == nullptr ? dmlc::LogMessageFatal(__FILE__, __LINE__).stream() \ + << tvm::kTVM_INTERNAL_ERROR_MESSAGE << __INDENT << "Check not null: " #x \ + << ' ', \ (x) : (x)) // NOLINT(*) /*! \brief The diagnostic level, controls the printing of the message. */ diff --git a/include/tvm/target/target_kind.h b/include/tvm/target/target_kind.h index dd14602fa6fc..c9ef736f7aee 100644 --- a/include/tvm/target/target_kind.h +++ b/include/tvm/target/target_kind.h @@ -295,7 +295,7 @@ inline TargetKindAttrMap TargetKind::GetAttrMap(const String& attr_na template inline TargetKindRegEntry& TargetKindRegEntry::set_attr(const String& attr_name, const ValueType& value, int plevel) { - CHECK_GT(plevel, 0) << "plevel in set_attr must be greater than 0"; + ICHECK_GT(plevel, 0) << "plevel in set_attr must be greater than 0"; runtime::TVMRetValue rv; rv = value; UpdateAttr(attr_name, rv, plevel); @@ -321,7 +321,7 @@ inline TargetKindRegEntry& TargetKindRegEntry::set_attrs_preprocessor(FLambda f) template inline TargetKindRegEntry& TargetKindRegEntry::add_attr_option(const String& key) { - CHECK(!kind_->key2vtype_.count(key)) + ICHECK(!kind_->key2vtype_.count(key)) << "AttributeError: add_attr_option failed because '" << key << "' has been set once"; kind_->key2vtype_[key] = detail::ValueTypeInfoMaker()(); return *this; diff --git a/include/tvm/tir/data_layout.h b/include/tvm/tir/data_layout.h index ee93a0675470..73da05c549e2 100644 --- a/include/tvm/tir/data_layout.h +++ b/include/tvm/tir/data_layout.h @@ -255,9 +255,9 @@ class Layout : public ObjectRef { } const LayoutAxis& operator[](int32_t i) const { - CHECK(defined()) << "Try to access axis from an undefined layout."; + ICHECK(defined()) << "Try to access axis from an undefined layout."; int32_t index = i < 0 ? static_cast(ndim() + i) : i; - CHECK(index >= 0 && static_cast(index) < ndim()) << "Invalid index " << i; + ICHECK(index >= 0 && static_cast(index) < ndim()) << "Invalid index " << i; const tir::IterVar axis = operator->()->axes[index]; return LayoutAxis::Get(axis); } diff --git a/include/tvm/tir/expr_functor.h b/include/tvm/tir/expr_functor.h index 3f73d21bb625..b5f1d64a00c4 100644 --- a/include/tvm/tir/expr_functor.h +++ b/include/tvm/tir/expr_functor.h @@ -58,7 +58,7 @@ namespace tir { * }; * MyExprFunctor f; * Var x("x"); - * CHECK_EQ(f(x + 1, 2), 3); + * ICHECK_EQ(f(x + 1, 2), 3); * \endcode * * \note Why do we need this more powerful Functor: diff --git a/include/tvm/topi/broadcast.h b/include/tvm/topi/broadcast.h index d03ddc93b4c0..f4f4f2ccb917 100644 --- a/include/tvm/topi/broadcast.h +++ b/include/tvm/topi/broadcast.h @@ -49,17 +49,17 @@ inline tvm::te::Tensor broadcast_to(const tvm::te::Tensor& t, const tvm::Array& output_shape, std::string name = "T_broadcast_to", std::string tag = kBroadcast) { - CHECK_GE(output_shape.size(), t->shape.size()) + ICHECK_GE(output_shape.size(), t->shape.size()) << "Not a broadcast, output dimensionality smaller than input.\noutput: " << output_shape << "\nvs\ninput: " << t; auto bh = detail::BroadcastShape(output_shape, t->shape); - CHECK_EQ(output_shape.size(), bh.common_shape.size()); + ICHECK_EQ(output_shape.size(), bh.common_shape.size()); Array oshape; for (size_t i = 0; i < output_shape.size(); ++i) { if (output_shape[i].as() == nullptr) { oshape.push_back(output_shape[i]); } else { - CHECK(topi::detail::EqualCheck(output_shape[i], bh.common_shape[i])); + ICHECK(topi::detail::EqualCheck(output_shape[i], bh.common_shape[i])); oshape.push_back(bh.common_shape[i]); } } diff --git a/include/tvm/topi/cuda/dense.h b/include/tvm/topi/cuda/dense.h index 447486d2fe0d..7fd3107b6c32 100644 --- a/include/tvm/topi/cuda/dense.h +++ b/include/tvm/topi/cuda/dense.h @@ -53,10 +53,10 @@ namespace cuda { inline tvm::te::Tensor dense_cuda(const Target& target, const tvm::te::Tensor& data, const tvm::te::Tensor& weight, const tvm::te::Tensor& bias, const DataType& out_dtype) { - CHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data"; - CHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight"; + ICHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data"; + ICHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight"; if (bias.defined()) { - CHECK_EQ(bias->shape.size(), 1) << "dense requires 1-D bias"; + ICHECK_EQ(bias->shape.size(), 1) << "dense requires 1-D bias"; } auto batch = data->shape[0]; @@ -64,7 +64,7 @@ inline tvm::te::Tensor dense_cuda(const Target& target, const tvm::te::Tensor& d auto out_dim = weight->shape[0]; if (target->GetLibs().count("cublas")) { - CHECK_EQ(data->dtype, out_dtype) << "Mixed precision not supported."; + ICHECK_EQ(data->dtype, out_dtype) << "Mixed precision not supported."; auto mm = topi::contrib::cublas_matmul(data, weight, false, true); if (bias.defined()) { mm = tvm::te::compute( diff --git a/include/tvm/topi/cuda/reduction.h b/include/tvm/topi/cuda/reduction.h index acfcc76b9ade..7160419422a6 100644 --- a/include/tvm/topi/cuda/reduction.h +++ b/include/tvm/topi/cuda/reduction.h @@ -60,7 +60,7 @@ Schedule ScheduleReduce(const Target& target, Operation op, Schedule sch, } auto out_stage = sch[data_out]; - CHECK_GT(out_stage->op.as()->reduce_axis.size(), 0) + ICHECK_GT(out_stage->op.as()->reduce_axis.size(), 0) << "reduce_axis must be greater than zero"; bool all_reduce; @@ -183,7 +183,7 @@ void TraverseAfterReduce(const Target& target, Schedule s, Operation op) { * \return A schedule for the given ops. */ Schedule schedule_reduce(const Target& target, Array outs) { - CHECK_EQ(outs.size(), 1) << "outs must have size 1"; + ICHECK_EQ(outs.size(), 1) << "outs must have size 1"; Array out_ops; for (auto t : outs) { out_ops.push_back(t->op); diff --git a/include/tvm/topi/detail/broadcast.h b/include/tvm/topi/detail/broadcast.h index e719348ecf77..5c701825840c 100644 --- a/include/tvm/topi/detail/broadcast.h +++ b/include/tvm/topi/detail/broadcast.h @@ -59,7 +59,7 @@ inline BroadcastHelper BroadcastShape(const tvm::Array& shape1, bh.vars1.push_front(bh.all_vars[0]); bh.vars2.push_front(bh.all_vars[0]); } else if (topi::detail::EqualCheck(one, shape1[s1_size - i])) { - CHECK(!topi::detail::EqualCheck(one, shape2[s2_size - i])); + ICHECK(!topi::detail::EqualCheck(one, shape2[s2_size - i])); bh.common_shape.push_front(shape2[s2_size - i]); bh.vars2.push_front(bh.all_vars[0]); } else if (topi::detail::EqualCheck(one, shape2[s2_size - i])) { @@ -78,10 +78,10 @@ inline BroadcastHelper BroadcastShape(const tvm::Array& shape1, bh.vars1.push_front(bh.all_vars[0]); bh.vars2.push_front(bh.all_vars[0]); } else { - CHECK(false) << "Incompatible broadcast dims: " << shape1[s1_size - i] << " and " - << shape2[s2_size - i] - << " in: " << tvm::Array(shape1.begin(), shape1.end()) << " and " - << tvm::Array(shape2.begin(), shape2.end()); + ICHECK(false) << "Incompatible broadcast dims: " << shape1[s1_size - i] << " and " + << shape2[s2_size - i] + << " in: " << tvm::Array(shape1.begin(), shape1.end()) << " and " + << tvm::Array(shape2.begin(), shape2.end()); } } // Remaining dimensions whether on shape1 or shape2 can always be completed @@ -100,7 +100,7 @@ inline tvm::Array InputIndexFromBroadcast( const tvm::Array& ovars, const tvm::te::Tensor& T, const std::deque& my_vars, const std::deque& all_vars) { tvm::Array ivars; - CHECK_EQ(ovars.size(), all_vars.size()); + ICHECK_EQ(ovars.size(), all_vars.size()); // N^2, could use a map but NBD. size_t expected_dims = T->shape.size(); for (size_t i = 0; i < ovars.size(); ++i) { @@ -118,7 +118,7 @@ inline tvm::Array InputIndexFromBroadcast( ivars.push_back(tvm::tir::make_zero(ovars[i].dtype())); } } - CHECK(expected_dims == ivars.size()); + ICHECK(expected_dims == ivars.size()); return ivars; } diff --git a/include/tvm/topi/detail/constant_utils.h b/include/tvm/topi/detail/constant_utils.h index 201a0da94278..412c79330ca9 100644 --- a/include/tvm/topi/detail/constant_utils.h +++ b/include/tvm/topi/detail/constant_utils.h @@ -76,7 +76,7 @@ inline std::vector GetConstIntValues(Array exprs, const std::stri std::vector result; if (!exprs.defined()) return result; for (auto expr : exprs) { - CHECK(IsConstInt(expr)) << "All elements of " << var_name << " must be constant integers"; + ICHECK(IsConstInt(expr)) << "All elements of " << var_name << " must be constant integers"; result.push_back(GetConstInt(expr)); } return result; @@ -96,7 +96,7 @@ inline std::vector GetConstInt64Values(Array exprs, std::vector result; if (!exprs.defined()) return result; for (auto expr : exprs) { - CHECK(IsConstInt(expr)) << "All elements of " << var_name << " must be constant integers"; + ICHECK(IsConstInt(expr)) << "All elements of " << var_name << " must be constant integers"; result.push_back(GetConstInt(expr)); } return result; diff --git a/include/tvm/topi/detail/extern.h b/include/tvm/topi/detail/extern.h index 48c3e18aa58e..caca1e85e520 100644 --- a/include/tvm/topi/detail/extern.h +++ b/include/tvm/topi/detail/extern.h @@ -79,7 +79,7 @@ inline Array make_extern(const Array >& out_shapes, const std::vector& out_types, const Array& inputs, FExtern fextern, std::string name, std::string tag, ::tvm::Map attrs) { - CHECK_EQ(out_shapes.size(), out_types.size()) + ICHECK_EQ(out_shapes.size(), out_types.size()) << "make_extern: out_shapes and out_types must have equal size"; Array input_placeholders; @@ -112,7 +112,7 @@ inline Array make_extern(const Array >& out_shapes, * \return An expression representing the pack operation */ inline PrimExpr pack_buffer(Buffer buf) { - CHECK_GT(buf->shape.size(), 0) << "buf shape must have at least one element"; + ICHECK_GT(buf->shape.size(), 0) << "buf shape must have at least one element"; auto shape = tvm::tir::Call(DataType::Handle(), tvm::tir::builtin::tvm_stack_make_shape(), buf->shape); PrimExpr strides; diff --git a/include/tvm/topi/detail/ravel_unravel.h b/include/tvm/topi/detail/ravel_unravel.h index fc775093e632..dd7bcac09a04 100644 --- a/include/tvm/topi/detail/ravel_unravel.h +++ b/include/tvm/topi/detail/ravel_unravel.h @@ -43,8 +43,8 @@ using namespace tvm::te; * \return The index after flattening */ inline PrimExpr RavelIndex(Array indices, Array shape) { - CHECK_EQ(indices.size(), shape.size()) << "indices and shape must have equal size"; - CHECK_GT(indices.size(), 0) << "indices must not be empty"; + ICHECK_EQ(indices.size(), shape.size()) << "indices and shape must have equal size"; + ICHECK_GT(indices.size(), 0) << "indices must not be empty"; PrimExpr idx; for (size_t i = 0; i < indices.size(); ++i) { if (i == 0) { diff --git a/include/tvm/topi/elemwise.h b/include/tvm/topi/elemwise.h index f537c9c865df..cad72cb591f8 100644 --- a/include/tvm/topi/elemwise.h +++ b/include/tvm/topi/elemwise.h @@ -327,7 +327,7 @@ inline Tensor reinterpret(const Tensor& x, DataType type, std::string name = "te */ inline Tensor elemwise_sum(const Array& xs, std::string name = "T_elemwise_sum", std::string tag = kElementWise) { - CHECK_GT(xs.size(), 0) << "elemwise sum must have at least one input tensor."; + ICHECK_GT(xs.size(), 0) << "elemwise sum must have at least one input tensor."; return compute( xs[0]->shape, [&](const Array& i) { diff --git a/include/tvm/topi/nn.h b/include/tvm/topi/nn.h index d257d3cbb863..ba1be3424fcc 100644 --- a/include/tvm/topi/nn.h +++ b/include/tvm/topi/nn.h @@ -98,8 +98,8 @@ inline tvm::te::Tensor leaky_relu(const tvm::te::Tensor& t, double alpha = 0.1, inline tvm::te::Tensor prelu(const tvm::te::Tensor& x, const tvm::te::Tensor& slope, const int axis = 1, std::string name = "T_prelu", std::string tag = kBroadcast) { - CHECK((size_t)axis < x->shape.size()) << "Wrong axis (" << axis << ")value. "; - CHECK(topi::detail::GetConstInt(slope->shape[0]) == topi::detail::GetConstInt(x->shape[axis])) + ICHECK((size_t)axis < x->shape.size()) << "Wrong axis (" << axis << ")value. "; + ICHECK(topi::detail::GetConstInt(slope->shape[0]) == topi::detail::GetConstInt(x->shape[axis])) << "Wrong slope shape received."; return tvm::te::compute( @@ -162,8 +162,8 @@ inline tvm::te::Tensor pad(const tvm::te::Tensor& t, const tvm::Array pad_before_int32; tvm::Array pad_after_int32; @@ -262,8 +262,8 @@ inline tvm::te::Tensor conv2d_nchw(const tvm::te::Tensor& I, const tvm::te::Tens int pad_h = 0, int pad_w = 0, int stride_h = 1, int stride_w = 1, std::string name = "T_conv2d_nchw", std::string tag = kConv2dNCHW) { - CHECK_EQ(4, I->shape.size()); - CHECK_EQ(4, W->shape.size()); + ICHECK_EQ(4, I->shape.size()); + ICHECK_EQ(4, W->shape.size()); auto pH = I->shape[2]; auto pW = I->shape[3]; tvm::Array output_shape{ @@ -306,8 +306,8 @@ inline tvm::te::Tensor conv2d_hwcn(const tvm::te::Tensor& I, const tvm::te::Tens int pad_h = 0, int pad_w = 0, int stride_h = 1, int stride_w = 1, std::string name = "T_conv2d_hwcn", std::string tag = kConv2dHWCN) { - CHECK_EQ(4, I->shape.size()); - CHECK_EQ(4, W->shape.size()); + ICHECK_EQ(4, I->shape.size()); + ICHECK_EQ(4, W->shape.size()); auto pH = I->shape[2]; auto pW = I->shape[3]; tvm::Array output_shape{ @@ -351,8 +351,8 @@ inline tvm::te::Tensor depthwise_conv2d_nchw(const tvm::te::Tensor& I, const tvm int stride_w = 1, std::string name = "T_depthwise_conv2d_nchw", std::string tag = kDepthwiseConv2dNCHW) { - CHECK_EQ(4, I->shape.size()); - CHECK_EQ(4, W->shape.size()); + ICHECK_EQ(4, I->shape.size()); + ICHECK_EQ(4, W->shape.size()); auto pH = I->shape[2]; auto pW = I->shape[3]; auto pCM = W->shape[1]; // channel_multiplier @@ -380,8 +380,8 @@ inline tvm::te::Tensor depthwise_conv2d_nhwc(const tvm::te::Tensor& I, const tvm int stride_w = 1, std::string name = "T_depthwise_conv2d_nhwc", std::string tag = kDepthwiseConv2dNHWC) { - CHECK_EQ(4, I->shape.size()); - CHECK_EQ(4, W->shape.size()); + ICHECK_EQ(4, I->shape.size()); + ICHECK_EQ(4, W->shape.size()); auto pH = I->shape[1]; auto pW = I->shape[2]; auto pCM = W->shape[1]; // channel_multiplier @@ -429,8 +429,8 @@ inline tvm::te::Tensor group_conv2d_ngchw(const tvm::te::Tensor& I, const tvm::t int stride_w = 1, std::string name = "T_group_conv2d_ngchw", std::string tag = kGroupConv2d) { - CHECK_EQ(5, I->shape.size()); - CHECK_EQ(5, W->shape.size()); + ICHECK_EQ(5, I->shape.size()); + ICHECK_EQ(5, W->shape.size()); auto pH = I->shape[2]; auto pW = I->shape[3]; tvm::Array output_shape{ diff --git a/include/tvm/topi/nn/bnn.h b/include/tvm/topi/nn/bnn.h index f72950861b8a..815b8a23c998 100644 --- a/include/tvm/topi/nn/bnn.h +++ b/include/tvm/topi/nn/bnn.h @@ -52,7 +52,7 @@ inline tvm::te::Tensor binarize_pack(const tvm::te::Tensor& data, int axis, std::string name = "PackedInput", std::string tag = "binarize_pack") { auto ishape = data->shape; - CHECK_EQ(GetConstInt(ishape[axis]) % 32, 0) + ICHECK_EQ(GetConstInt(ishape[axis]) % 32, 0) << "binarize_pack: axis size must be a multiple of 32"; arith::Analyzer analyzer; @@ -99,10 +99,10 @@ inline tvm::te::Tensor binarize_pack(const tvm::te::Tensor& data, int axis, * \return Tensor with shape [batch, out_dim], dtype is float32 */ inline tvm::te::Tensor binary_dense(const tvm::te::Tensor& data, const tvm::te::Tensor& weight) { - CHECK_EQ(data->shape.size(), 2) << "binary_dense requires 2-D data"; - CHECK_EQ(weight->shape.size(), 2) << "binary_dense requires 2-D weight"; - CHECK_EQ(data->dtype, DataType::UInt(32)) << "binary_dense requires uint32 data"; - CHECK_EQ(weight->dtype, DataType::UInt(32)) << "binary_dense requires uint32 weight"; + ICHECK_EQ(data->shape.size(), 2) << "binary_dense requires 2-D data"; + ICHECK_EQ(weight->shape.size(), 2) << "binary_dense requires 2-D weight"; + ICHECK_EQ(data->dtype, DataType::UInt(32)) << "binary_dense requires uint32 data"; + ICHECK_EQ(weight->dtype, DataType::UInt(32)) << "binary_dense requires uint32 weight"; auto batch = data->shape[0]; auto in_dim = data->shape[1]; diff --git a/include/tvm/topi/nn/dense.h b/include/tvm/topi/nn/dense.h index ad18cb063f10..113002dc2d88 100644 --- a/include/tvm/topi/nn/dense.h +++ b/include/tvm/topi/nn/dense.h @@ -47,10 +47,10 @@ using namespace tvm::te; */ inline tvm::te::Tensor dense(const tvm::te::Tensor& data, const tvm::te::Tensor& weight, const tvm::te::Tensor& bias, const DataType& out_dtype) { - CHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data"; - CHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight"; + ICHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data"; + ICHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight"; if (bias.defined()) { - CHECK_EQ(bias->shape.size(), 1) << "dense requires 1-D bias"; + ICHECK_EQ(bias->shape.size(), 1) << "dense requires 1-D bias"; } auto batch = data->shape[0]; diff --git a/include/tvm/topi/nn/dilate.h b/include/tvm/topi/nn/dilate.h index 9b5a8047740e..3369316e4d7f 100644 --- a/include/tvm/topi/nn/dilate.h +++ b/include/tvm/topi/nn/dilate.h @@ -45,7 +45,7 @@ using namespace tvm::te; * \return The logical conjunction expression */ PrimExpr all(Array args) { - CHECK_GT(args.size(), 0) << "all requires at least one argument"; + ICHECK_GT(args.size(), 0) << "all requires at least one argument"; PrimExpr ret = args[0]; for (size_t i = 1; i < args.size(); ++i) { @@ -70,8 +70,8 @@ PrimExpr all(Array args) { inline Tensor dilate(const Tensor& x, Array strides, double dilation_value, std::string name = "tensor", std::string tag = kInjective) { auto n = x->shape.size(); - CHECK_EQ(n, strides.size()) << "strides size (" << strides.size() - << ") must match dimension of x (" << n << ")"; + ICHECK_EQ(n, strides.size()) << "strides size (" << strides.size() + << ") must match dimension of x (" << n << ")"; Array out_shape; arith::Analyzer analyzer; diff --git a/include/tvm/topi/nn/local_response_norm.h b/include/tvm/topi/nn/local_response_norm.h index 0170c503d9ff..717adb8ff8fa 100644 --- a/include/tvm/topi/nn/local_response_norm.h +++ b/include/tvm/topi/nn/local_response_norm.h @@ -52,9 +52,9 @@ using namespace tvm::te; inline Tensor lrn(const Tensor& data, int size, int axis = 1, float alpha = 0.0001, float beta = 0.75, float bias = 2, std::string name = "tensor", std::string tag = kBroadcast) { - CHECK_EQ(data->shape.size(), 4) << "LRN requires 4-D input"; - CHECK_EQ(size % 2, 1) << "size should be odd number"; - CHECK(axis == 1 || axis == 3) << "axis should be 1 or 3 for NCHW and NHWC"; + ICHECK_EQ(data->shape.size(), 4) << "LRN requires 4-D input"; + ICHECK_EQ(size % 2, 1) << "size should be odd number"; + ICHECK(axis == 1 || axis == 3) << "axis should be 1 or 3 for NCHW and NHWC"; auto input_shape = data->shape; Array pad_before{0, 0, 0, 0}; Array pad_after{0, 0, 0, 0}; diff --git a/include/tvm/topi/nn/pooling.h b/include/tvm/topi/nn/pooling.h index 2396fc25c23f..882793877ed6 100644 --- a/include/tvm/topi/nn/pooling.h +++ b/include/tvm/topi/nn/pooling.h @@ -65,10 +65,10 @@ inline Tensor pool_impl(const Tensor& x, const Array& kernel_size, const Array& stride_size, const Array& padding_size, PoolType pool_type, bool ceil_mode, const size_t height_axis, const size_t width_axis, bool count_include_pad) { - CHECK(x->shape.size() >= 2) << "Pooling input must >= 2-D (H, W)"; - CHECK_EQ(kernel_size.size(), 2) << "Pooling kernel_size must have 2 elements"; - CHECK_EQ(stride_size.size(), 2) << "Pooling stride_size must have 2 elements"; - CHECK_EQ(padding_size.size(), 4) << "Pooling padding_size must have 4 elements"; + ICHECK(x->shape.size() >= 2) << "Pooling input must >= 2-D (H, W)"; + ICHECK_EQ(kernel_size.size(), 2) << "Pooling kernel_size must have 2 elements"; + ICHECK_EQ(stride_size.size(), 2) << "Pooling stride_size must have 2 elements"; + ICHECK_EQ(padding_size.size(), 4) << "Pooling padding_size must have 4 elements"; auto kernel_height = cast(DataType::DataType::Int(32), kernel_size[0]); auto kernel_width = cast(DataType::DataType::Int(32), kernel_size[1]); @@ -181,11 +181,11 @@ inline Tensor pool_grad_impl(const Tensor& out_grad, const Tensor& x, const Array& padding_size, PoolType pool_type, bool ceil_mode, const size_t height_axis, const size_t width_axis, bool count_include_pad) { - CHECK(out_grad->shape.size() >= 2) << "Pooling grad output must >= 2-D (H, W)"; - CHECK(x->shape.size() >= 2) << "Pooling input must >= 2-D (H, W)"; - CHECK_EQ(kernel_size.size(), 2) << "Pooling kernel_size must have 2 elements"; - CHECK_EQ(stride_size.size(), 2) << "Pooling stride_size must have 2 elements"; - CHECK_EQ(padding_size.size(), 4) << "Pooling padding_size must have 4 elements"; + ICHECK(out_grad->shape.size() >= 2) << "Pooling grad output must >= 2-D (H, W)"; + ICHECK(x->shape.size() >= 2) << "Pooling input must >= 2-D (H, W)"; + ICHECK_EQ(kernel_size.size(), 2) << "Pooling kernel_size must have 2 elements"; + ICHECK_EQ(stride_size.size(), 2) << "Pooling stride_size must have 2 elements"; + ICHECK_EQ(padding_size.size(), 4) << "Pooling padding_size must have 4 elements"; auto kernel_height = cast(DataType::DataType::Int(32), kernel_size[0]); auto kernel_width = cast(DataType::DataType::Int(32), kernel_size[1]); @@ -372,7 +372,7 @@ inline bool find_depth_height_width(const std::string& layout, int* depth_axis, inline bool find_height_width(const std::string& layout, int* height_axis, int* width_axis) { int dummy; - CHECK_EQ(find_depth_height_width(layout, &dummy, height_axis, width_axis), false); + ICHECK_EQ(find_depth_height_width(layout, &dummy, height_axis, width_axis), false); if (*height_axis != -1 && *width_axis != -1) { return true; } @@ -381,7 +381,7 @@ inline bool find_height_width(const std::string& layout, int* height_axis, int* inline bool find_width(const std::string& layout, int* width_axis) { int dummy; - CHECK_EQ(find_depth_height_width(layout, &dummy, &dummy, width_axis), false); + ICHECK_EQ(find_depth_height_width(layout, &dummy, &dummy, width_axis), false); if (*width_axis != -1) { return true; } @@ -422,7 +422,7 @@ inline Tensor pool(const Tensor& x, const Array& kernel_size, PoolType pool_type, bool ceil_mode, const std::string& layout = "NCHW", bool count_include_pad = true) { int height_axis = -1, width_axis = -1; - CHECK(find_height_width(layout, &height_axis, &width_axis)) << "Unsupported layout " << layout; + ICHECK(find_height_width(layout, &height_axis, &width_axis)) << "Unsupported layout " << layout; return pool_impl(x, kernel_size, stride_size, padding_size, pool_type, ceil_mode, height_axis, width_axis, count_include_pad); } @@ -462,7 +462,7 @@ inline Tensor pool_grad(const Tensor& out_grad, const Tensor& x, const Array& output_size, PoolType pool_type, const std::vector& axes) { const auto n_dim = output_size.size(); - CHECK_EQ(axes.size(), n_dim) << "The number of axes not equal to the in/out dimension"; + ICHECK_EQ(axes.size(), n_dim) << "The number of axes not equal to the in/out dimension"; Array data_shape = x->shape; for (size_t i = 0; i < data_shape.size(); ++i) { @@ -591,7 +591,7 @@ inline Tensor adaptive_pool_impl(const Tensor& x, const Array& output_ inline Tensor adaptive_pool(const Tensor& x, const Array& output_size, PoolType pool_type, const std::string& layout = "NCHW") { int height_axis = -1, width_axis = -1; - CHECK(find_height_width(layout, &height_axis, &width_axis)) << "Unsupported layout " << layout; + ICHECK(find_height_width(layout, &height_axis, &width_axis)) << "Unsupported layout " << layout; return adaptive_pool_impl(x, output_size, pool_type, {height_axis, width_axis}); } @@ -606,7 +606,7 @@ inline Tensor adaptive_pool(const Tensor& x, const Array& output_size, inline Tensor adaptive_pool3d(const Tensor& x, const Array& output_size, PoolType pool_type, const std::string& layout = "NCDHW") { int depth_axis = -1, height_axis = -1, width_axis = -1; - CHECK(find_depth_height_width(layout, &depth_axis, &height_axis, &width_axis)) + ICHECK(find_depth_height_width(layout, &depth_axis, &height_axis, &width_axis)) << "Unsupported layout " << layout; return adaptive_pool_impl(x, output_size, pool_type, {depth_axis, height_axis, width_axis}); } @@ -661,10 +661,10 @@ inline Tensor pool_impl_nd(const Tensor& x, const Array& kernel_size, bool count_include_pad) { int k_size = kernel_size.size(); int x_size = x->shape.size(); - CHECK_EQ(stride_size.size(), k_size) << "Pooling stride_size must have same elements as kernel"; - CHECK_EQ(padding_size.size(), k_size * 2) << "Pooling padding_size must has double elements of" - " kernel"; - CHECK_EQ(axis.size(), k_size) << "axis must have same elements as kernel"; + ICHECK_EQ(stride_size.size(), k_size) << "Pooling stride_size must have same elements as kernel"; + ICHECK_EQ(padding_size.size(), k_size * 2) << "Pooling padding_size must has double elements of" + " kernel"; + ICHECK_EQ(axis.size(), k_size) << "axis must have same elements as kernel"; Array daxis; std::vector kernel(k_size); @@ -812,7 +812,7 @@ inline Tensor pool1d(const Tensor& x, const Array& kernel_size, PoolType pool_type, bool ceil_mode, const std::string& layout = "NCW", bool count_include_pad = true) { int width_axis = -1; - CHECK(find_width(layout, &width_axis)) << "Unsupported layout " << layout; + ICHECK(find_width(layout, &width_axis)) << "Unsupported layout " << layout; std::vector axis = {width_axis}; return pool_impl_nd(x, kernel_size, stride_size, padding_size, pool_type, ceil_mode, axis, count_include_pad); @@ -853,7 +853,7 @@ inline Tensor pool3d(const Tensor& x, const Array& kernel_size, PoolType pool_type, bool ceil_mode, const std::string& layout = "NCDHW", bool count_include_pad = true) { int depth_axis = -1, height_axis = -1, width_axis = -1; - CHECK(find_depth_height_width(layout, &depth_axis, &height_axis, &width_axis)) + ICHECK(find_depth_height_width(layout, &depth_axis, &height_axis, &width_axis)) << "Unsupported layout " << layout; std::vector axis = {depth_axis, height_axis, width_axis}; return pool_impl_nd(x, kernel_size, stride_size, padding_size, pool_type, ceil_mode, axis, diff --git a/include/tvm/topi/nn/softmax.h b/include/tvm/topi/nn/softmax.h index 2e94f9103c68..78a9ec40bf89 100644 --- a/include/tvm/topi/nn/softmax.h +++ b/include/tvm/topi/nn/softmax.h @@ -54,7 +54,7 @@ inline Tensor softmax(const Tensor& x, int axis = -1, std::string name = "tensor if (axis < 0) { axis = ndim + axis; } - CHECK_LT(axis, ndim) << "axis parameter should be less than input dim"; + ICHECK_LT(axis, ndim) << "axis parameter should be less than input dim"; auto k1 = tvm::te::reduce_axis(Range(0, input_shape[axis]), "k1"); auto k2 = tvm::te::reduce_axis(Range(0, input_shape[axis]), "k2"); @@ -124,7 +124,7 @@ inline Tensor softmax(const Tensor& x, int axis = -1, std::string name = "tensor */ inline Tensor log_softmax(const Tensor& x, std::string name = "tensor", std::string tag = "log_softmax_output") { - CHECK_EQ(x->shape.size(), 2) << "Log softmax requires 2-D input"; + ICHECK_EQ(x->shape.size(), 2) << "Log softmax requires 2-D input"; PrimExpr m = x->shape[0]; PrimExpr n = x->shape[1]; diff --git a/include/tvm/topi/reduction.h b/include/tvm/topi/reduction.h index 75c8265a63ce..2a2f2113e9b1 100644 --- a/include/tvm/topi/reduction.h +++ b/include/tvm/topi/reduction.h @@ -75,8 +75,8 @@ inline std::vector GetRealAxis(int ndim, const Array& axis) { if (val < 0) { val += ndim; } - CHECK_LE(val, ndim) << " exceeds the maximum dimension " << ndim; - CHECK_GE(val, 0); + ICHECK_LE(val, ndim) << " exceeds the maximum dimension " << ndim; + ICHECK_GE(val, 0); real_axis.push_back(static_cast(val)); } std::sort(real_axis.begin(), real_axis.end()); @@ -181,7 +181,7 @@ inline Tensor DoCommReduce(const Tensor& data, FReduce func, const Array& axis, FReduce func, bool keepdims, bool atleast1d) { auto ndim = data->shape.size(); - CHECK_NE(ndim, 0) << "Cannot reduce a 0 dim Tensor"; + ICHECK_NE(ndim, 0) << "Cannot reduce a 0 dim Tensor"; auto real_axis = GetRealAxis(static_cast(ndim), axis); auto target_shape = MakeReduceTargetShape(real_axis, data, keepdims, atleast1d); return DoCommReduce(data, func, target_shape, real_axis, @@ -204,7 +204,7 @@ inline Tensor CommReduce(const Tensor& data, const Array& axis, FReduce inline Tensor CommReduceIdx(const Tensor& data, const Array& axis, FCommReduce func, bool keepdims, bool atleast1d) { auto ndim = data->shape.size(); - CHECK_NE(ndim, 0) << "Cannot reduce a 0 dim Tensor"; + ICHECK_NE(ndim, 0) << "Cannot reduce a 0 dim Tensor"; auto real_axis = GetRealAxis(static_cast(ndim), axis); auto reduce_axes = MakeReduceAxes(real_axis, data); auto target_shape = MakeReduceTargetShape(real_axis, data, keepdims, atleast1d); @@ -325,7 +325,7 @@ inline Tensor sum(const Tensor& data, const Array& axis, bool keepdims } inline Tensor collapse_sum(const Tensor& data, Array target_shape) { - CHECK_GE(data->shape.size(), target_shape.size()); + ICHECK_GE(data->shape.size(), target_shape.size()); auto ishape = detail::GetConstIntValues(data->shape, "ishape"); auto oshape = detail::GetConstIntValues(target_shape, "oshape"); diff --git a/include/tvm/topi/rocm/dense.h b/include/tvm/topi/rocm/dense.h index a1e4d14b9719..b861e6c89a67 100644 --- a/include/tvm/topi/rocm/dense.h +++ b/include/tvm/topi/rocm/dense.h @@ -53,10 +53,10 @@ namespace rocm { inline tvm::te::Tensor dense_rocm(const Target& target, const tvm::te::Tensor& data, const tvm::te::Tensor& weight, const tvm::te::Tensor& bias, const DataType& out_dtype) { - CHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data"; - CHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight"; + ICHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data"; + ICHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight"; if (bias.defined()) { - CHECK_EQ(bias->shape.size(), 1) << "dense requires 1-D bias"; + ICHECK_EQ(bias->shape.size(), 1) << "dense requires 1-D bias"; } auto batch = data->shape[0]; @@ -64,7 +64,7 @@ inline tvm::te::Tensor dense_rocm(const Target& target, const tvm::te::Tensor& d auto out_dim = weight->shape[0]; if (target->GetLibs().count("rocblas")) { - CHECK_EQ(data->dtype, out_dtype) << "Mixed precision not supported."; + ICHECK_EQ(data->dtype, out_dtype) << "Mixed precision not supported."; auto mm = topi::contrib::rocblas_matmul(data, weight, false, true); if (bias.defined()) { mm = tvm::te::compute( diff --git a/include/tvm/topi/transform.h b/include/tvm/topi/transform.h index aa5c6d2a2256..fa27faf18f15 100644 --- a/include/tvm/topi/transform.h +++ b/include/tvm/topi/transform.h @@ -60,11 +60,11 @@ using namespace topi::detail; inline Tensor expand_dims(const Tensor& x, int axis, int num_newaxis = 1, std::string name = "T_expand_dims", std::string tag = kBroadcast) { int ndim = static_cast(x->shape.size()); - CHECK(-ndim - 1 <= axis && axis <= ndim) + ICHECK(-ndim - 1 <= axis && axis <= ndim) << "expand_dims only accepts `axis` in [-data.ndim - 1, data.ndim]" << ", but got axis = " << axis << ", and data.ndim = " << ndim; - CHECK(num_newaxis >= 0) << "expand_dims only accepts `num_newaxis >= 0`" - << ", but got num_newaxis = " << num_newaxis; + ICHECK(num_newaxis >= 0) << "expand_dims only accepts `num_newaxis >= 0`" + << ", but got num_newaxis = " << num_newaxis; if (axis < 0) { // Calculate offset from last dimension axis = ndim + axis + 1; @@ -123,13 +123,13 @@ inline Tensor transpose(const Tensor& x, Array axes, std::string name = new_axis = static_cast(x->shape.size()) + axis; axes.Set(i, new_axis); } - CHECK((new_axis >= 0) && (new_axis < static_cast(x->shape.size()))) + ICHECK((new_axis >= 0) && (new_axis < static_cast(x->shape.size()))) << "axis=" << axis << " is invalid for the " << static_cast(x->shape.size()) << "-dimensional input tensor"; for (size_t j = 0; j < axes.size(); ++j) { if (i != j) { - CHECK(new_axis != static_cast(axes[j]->value)) << "repeated axis in transpose"; + ICHECK(new_axis != static_cast(axes[j]->value)) << "repeated axis in transpose"; } } new_shape.push_back(x->shape[new_axis]); @@ -178,14 +178,14 @@ inline Tensor reverse_sequence(const Tensor& x, const Tensor& seq_lengths, int s batch_axis = static_cast(x->shape.size()) + batch_axis; } - CHECK(seq_lengths_dim == 1) << "seq_lengths should be 1D vector"; + ICHECK(seq_lengths_dim == 1) << "seq_lengths should be 1D vector"; - CHECK(GetConstInt(seq_lengths->shape[0]) == GetConstInt(x->shape[batch_axis])) + ICHECK(GetConstInt(seq_lengths->shape[0]) == GetConstInt(x->shape[batch_axis])) << "For reverse_sequnece seq_lengths size should match with dimension of batch axis" << ", but got dimension of batch_axis = " << GetConstInt(x->shape[batch_axis]) << ", and seq_length size = " << GetConstInt(seq_lengths->shape[0]); - CHECK((0 <= batch_axis) && (batch_axis < static_cast(x->shape.size()))) + ICHECK((0 <= batch_axis) && (batch_axis < static_cast(x->shape.size()))) << "batch_axis=" << batch_axis_inp << " is invalid for the " << static_cast(x->shape.size()) << "-dimensional input tensor"; } @@ -193,7 +193,7 @@ inline Tensor reverse_sequence(const Tensor& x, const Tensor& seq_lengths, int s if (seq_axis < 0) { seq_axis = static_cast(x->shape.size()) + seq_axis; } - CHECK((0 <= seq_axis) && (seq_axis < static_cast(x->shape.size()))) + ICHECK((0 <= seq_axis) && (seq_axis < static_cast(x->shape.size()))) << "seq_axis=" << seq_axis_inp << " is invalid for the " << static_cast(x->shape.size()) << "-dimensional input tensor"; @@ -332,7 +332,7 @@ inline Tensor squeeze(const Tensor& x, Array axis, bool atleast1d = fal if (val < 0) { val += static_cast(x->shape.size()); } - CHECK_EQ(GetConstInt(x->shape[val]), 1) << "Dimension " << val << " must have size 1"; + ICHECK_EQ(GetConstInt(x->shape[val]), 1) << "Dimension " << val << " must have size 1"; axis_val.push_back(val); } } @@ -380,12 +380,12 @@ inline Tensor squeeze(const Tensor& x, Array axis, bool atleast1d = fal inline Tensor concatenate(const Array& inputs, int axis = 0, std::string name = "T_concat", std::string tag = kInjective) { int ndim = static_cast(inputs[0]->shape.size()); - CHECK(-ndim <= axis && axis < ndim) << "concatenate only accepts `axis` in [-ndim, ndim)" - << ", but got axis = " << axis << ", and ndim = " << ndim; + ICHECK(-ndim <= axis && axis < ndim) << "concatenate only accepts `axis` in [-ndim, ndim)" + << ", but got axis = " << axis << ", and ndim = " << ndim; if (axis < 0) { axis += ndim; } - CHECK_LT(axis, inputs[0]->shape.size()) << "axis out of bounds"; + ICHECK_LT(axis, inputs[0]->shape.size()) << "axis out of bounds"; Array axis_sizes; for (auto t : inputs) { @@ -439,13 +439,13 @@ inline Tensor concatenate(const Array& inputs, int axis = 0, std::string inline Tensor stack(const Array& inputs, int axis = 0, std::string name = "T_stack", std::string tag = kInjective) { int ndim = static_cast(inputs[0]->shape.size()); - CHECK(-ndim - 1 <= axis && axis <= ndim) + ICHECK(-ndim - 1 <= axis && axis <= ndim) << "stack only accepts `axis` in [-ndim, ndim)" << ", but got axis = " << axis << ", and ndim = " << ndim; if (axis < 0) { axis += ndim + 1; } - CHECK_LT(axis, inputs[0]->shape.size() + 1) << "axis out of bounds"; + ICHECK_LT(axis, inputs[0]->shape.size() + 1) << "axis out of bounds"; const int stack_size = static_cast(inputs.size()); Array out_shape; @@ -487,7 +487,7 @@ inline Array split(const Tensor& x, Array split_indices, int a if (axis < 0) { axis += static_cast(x->shape.size()); } - CHECK_LT(axis, x->shape.size()) << "axis out of bounds"; + ICHECK_LT(axis, x->shape.size()) << "axis out of bounds"; auto src_axis_size = x->shape[axis]; std::vector begin_ids; @@ -497,7 +497,7 @@ inline Array split(const Tensor& x, Array split_indices, int a auto idx_node = idx.as(); auto back_node = begin_ids.back().as(); if (idx_node && back_node) { - CHECK_GT(idx_node->value, back_node->value) << "split_indices must be sorted"; + ICHECK_GT(idx_node->value, back_node->value) << "split_indices must be sorted"; } begin_ids.push_back(idx); } @@ -569,7 +569,7 @@ inline Tensor strided_slice(const Tensor& x, const Array& begin, const // Consider to refactor in the future. std::vector stride_vec(src_tensor_dim, 1); for (size_t i = 0; i < strides.size(); ++i) { - CHECK(strides[i].defined()); + ICHECK(strides[i].defined()); stride_vec[i] = strides[i]->value; } @@ -630,7 +630,7 @@ inline Tensor strided_slice(const Tensor& x, const Array& begin, const int interval = std::abs(end_i - begin_i); int slice_size = static_cast((interval + std::abs(stride_vec[i]) - 1) / std::abs(stride_vec[i])); - CHECK(stride_vec[i] < 0 ? (end_i <= begin_i) : (begin_i <= end_i)) + ICHECK(stride_vec[i] < 0 ? (end_i <= begin_i) : (begin_i <= end_i)) << ": Input [Begin=" << begin_vec[i] << ", End=" << end_vec[i] << "] is invalid for axis=" << i; @@ -670,14 +670,14 @@ inline Array split_sections(const Tensor& x, int num_sections, int axis, if (axis < 0) { axis += static_cast(x->shape.size()); } - CHECK_LT(axis, x->shape.size()) << "axis out of bounds"; + ICHECK_LT(axis, x->shape.size()) << "axis out of bounds"; auto src_axis_size = x->shape[axis]; - CHECK_GT(num_sections, 0) << "Slice count must be > 0"; + ICHECK_GT(num_sections, 0) << "Slice count must be > 0"; if (auto node = src_axis_size.as()) { - CHECK_EQ(node->value % num_sections, 0) + ICHECK_EQ(node->value % num_sections, 0) << "num_sections must be an integer factor of the size of axis " << axis << " (" << node->value << ")"; } @@ -756,8 +756,8 @@ inline Tensor take(const Tensor& a, const Tensor& indices, std::string mode = "c inline Tensor sequence_mask(const Tensor& data, const Tensor& valid_length, double mask_value, int axis, std::string name = "T_sequence_mask", std::string tag = kInjective) { - CHECK(axis == 0 || axis == 1) << "axis must be either 0 or 1"; - CHECK_EQ(valid_length->shape.size(), 1) << "valid_length must have ndim=1, i.e., (batch_size,)."; + ICHECK(axis == 0 || axis == 1) << "axis must be either 0 or 1"; + ICHECK_EQ(valid_length->shape.size(), 1) << "valid_length must have ndim=1, i.e., (batch_size,)."; auto length_dim = data->shape[axis]; auto batch_dim = data->shape[1 - axis]; Array out_shape = data->shape; @@ -795,8 +795,8 @@ inline Tensor take(const Tensor& a, const Tensor& indices, int axis, std::string if (axis < 0) { axis += static_cast(a->shape.size()); } - CHECK_GE(axis, 0) << "axis out of bounds"; - CHECK_LT(axis, a->shape.size()) << "axis out of bounds"; + ICHECK_GE(axis, 0) << "axis out of bounds"; + ICHECK_LT(axis, a->shape.size()) << "axis out of bounds"; auto axis_dim = a->shape[axis]; int indices_len = static_cast(indices->shape.size()); @@ -887,11 +887,11 @@ inline Tensor take(const Tensor& a, const Tensor& indices, int axis, std::string */ inline Tensor where(const Tensor& condition, const Tensor& x, const Tensor& y, std::string name = "T_where", std::string tag = kBroadcast) { - CHECK_EQ(x->shape.size(), y->shape.size()) + ICHECK_EQ(x->shape.size(), y->shape.size()) << "x and y must have the same shape.Got different number of dimension: " << x->shape.size() << " vs " << y->shape.size(); - CHECK_EQ(x->dtype, y->dtype) << "x and y must have the same dtype: " << x->dtype << " vs " - << y->dtype; + ICHECK_EQ(x->dtype, y->dtype) << "x and y must have the same dtype: " << x->dtype << " vs " + << y->dtype; if (x->shape.size() == 0) { return compute( @@ -908,7 +908,7 @@ inline Tensor where(const Tensor& condition, const Tensor& x, const Tensor& y, }, name, tag); } else if (condition->shape.size() != 1) { - CHECK_EQ(condition->shape.size(), x->shape.size()) + ICHECK_EQ(condition->shape.size(), x->shape.size()) << "condition array must be either have the same shape as x or to be a " "1-D array.Got different number of dimension: " << condition->shape.size() << " vs " << x->shape.size(); @@ -922,7 +922,7 @@ inline Tensor where(const Tensor& condition, const Tensor& x, const Tensor& y, int64_t cond_first_dim = topi::GetConstInt(condition->shape[0]); int64_t x_first_dim = topi::GetConstInt(x->shape[0]); if (cond_first_dim > 0 && x_first_dim > 0) { - CHECK_EQ(cond_first_dim, x_first_dim) + ICHECK_EQ(cond_first_dim, x_first_dim) << "If condition is 1-D, the first dimension must be the same as x: " << cond_first_dim << " vs " << x_first_dim; } @@ -951,11 +951,11 @@ inline Tensor where(const Tensor& condition, const Tensor& x, const Tensor& y, inline Tensor repeat(const Tensor& x, int repeats, int axis, std::string name = "T_repeat", std::string tag = kBroadcast) { int ndim = static_cast(x->shape.size()); - CHECK(-ndim - 1 <= axis && axis <= ndim) + ICHECK(-ndim - 1 <= axis && axis <= ndim) << "repeat only accepts `axis` in [-data.ndim - 1, data.ndim]" << ", but got axis = " << axis << ", and data.ndim = " << ndim; - CHECK(repeats >= 1) << "repeat only accepts `repeats >= 1`" - << ", but got repeats = " << repeats; + ICHECK(repeats >= 1) << "repeat only accepts `repeats >= 1`" + << ", but got repeats = " << repeats; if (axis < 0) { // Calculate offset from last dimension axis += ndim; @@ -1091,13 +1091,13 @@ inline Tensor gather(const Tensor& data, int axis, const Tensor& indices, std::string name = "T_gather", std::string tag = kInjective) { size_t ndim_d = data->shape.size(); size_t ndim_i = indices->shape.size(); - CHECK_GE(ndim_d, 1) << "Cannot gather from a scalar."; - CHECK_EQ(ndim_d, ndim_i); - CHECK_GE(axis, 0); - CHECK_LT(axis, ndim_d); + ICHECK_GE(ndim_d, 1) << "Cannot gather from a scalar."; + ICHECK_EQ(ndim_d, ndim_i); + ICHECK_GE(axis, 0); + ICHECK_LT(axis, ndim_d); size_t indices_dim_i = static_cast(GetConstInt(indices->shape[axis])); - CHECK_GE(indices_dim_i, 1); - CHECK(indices->dtype.is_int()); + ICHECK_GE(indices_dim_i, 1); + ICHECK(indices->dtype.is_int()); Array out_shape; for (size_t i = 0; i < ndim_i; ++i) { @@ -1138,10 +1138,10 @@ inline Tensor gather_nd(const Tensor& data, const Tensor& indices, std::string n std::string tag = kInjective) { size_t ndim_d = data->shape.size(); size_t ndim_i = indices->shape.size(); - CHECK_GE(ndim_i, 1) << "indices tensor must have at least 1 dimensions"; + ICHECK_GE(ndim_i, 1) << "indices tensor must have at least 1 dimensions"; size_t indices_dim0 = static_cast(GetConstInt(indices->shape[0])); - CHECK_LE(indices_dim0, ndim_d) << "dim 0 of indices tensor must be no more " - << "than dimensions of data tensor"; + ICHECK_LE(indices_dim0, ndim_d) << "dim 0 of indices tensor must be no more " + << "than dimensions of data tensor"; Array out_shape; for (size_t i = 1; i < ndim_i; ++i) { out_shape.push_back(indices->shape[i]); @@ -1216,8 +1216,8 @@ inline tvm::te::Tensor matmul(const tvm::te::Tensor& A, const tvm::te::Tensor& B */ inline Tensor tensordot(const Tensor& A, const tvm::te::Tensor& B, int axes = 2, std::string name = "T_tensordot", std::string tag = kMatMul) { - CHECK_GE(A->shape.size(), axes); - CHECK_GE(B->shape.size(), axes); + ICHECK_GE(A->shape.size(), axes); + ICHECK_GE(B->shape.size(), axes); Array output_shape(A->shape.begin(), A->shape.end() + (-axes)); for (auto it = B->shape.begin() + axes; it != B->shape.end(); ++it) output_shape.push_back(*it); @@ -1262,7 +1262,7 @@ inline Tensor tensordot(const Tensor& A, const tvm::te::Tensor& B, int axes = 2, inline Tensor tensordot(const Tensor& A, const tvm::te::Tensor& B, Array A_axes, Array B_axes, std::string name = "T_tensordot", std::string tag = kMatMul) { - CHECK_EQ(A_axes.size(), B_axes.size()); + ICHECK_EQ(A_axes.size(), B_axes.size()); auto A_axes_val = GetConstIntValues(A_axes, "A_axes"); auto B_axes_val = GetConstIntValues(B_axes, "B_axes"); @@ -1366,11 +1366,12 @@ inline Tensor layout_transform(const Tensor& src, const std::string& src_layout, return src; } - CHECK(src_layout_struct.defined() && dst_layout_struct.defined()) + ICHECK(src_layout_struct.defined() && dst_layout_struct.defined()) << "cannot convert from/to undefined layout"; auto layout_converter = tir::BijectiveLayout(src_layout_struct, dst_layout_struct); - CHECK(layout_converter.defined()) << "cannot convert from " << src_layout << " to " << dst_layout; + ICHECK(layout_converter.defined()) + << "cannot convert from " << src_layout << " to " << dst_layout; Array dst_shape = layout_converter.ForwardShape(src->shape); @@ -1499,9 +1500,10 @@ inline Tensor sparse_to_dense(const Tensor& sparse_indices, const Array const Tensor& sparse_values, const PrimExpr& default_value, const std::string name = "T_sparse_to_dense", const std::string tag = kInjective) { - CHECK(sparse_indices->dtype.is_int()) << "sparse_indices only accepts integer values"; - CHECK_LE(sparse_indices->shape.size(), 3) << "sparse_indices tensor should be 0D, 1D, or 2D only"; - CHECK_LE(sparse_values->shape.size(), 2) << "sparse_values tensor should be 0D or 1D only"; + ICHECK(sparse_indices->dtype.is_int()) << "sparse_indices only accepts integer values"; + ICHECK_LE(sparse_indices->shape.size(), 3) + << "sparse_indices tensor should be 0D, 1D, or 2D only"; + ICHECK_LE(sparse_values->shape.size(), 2) << "sparse_values tensor should be 0D or 1D only"; const auto rank_sparse_indices = static_cast(sparse_indices->shape.size()); Array oshape; diff --git a/src/arith/analyzer.cc b/src/arith/analyzer.cc index daf61441b466..9737b53703fd 100644 --- a/src/arith/analyzer.cc +++ b/src/arith/analyzer.cc @@ -47,7 +47,7 @@ void Analyzer::Bind(const Var& var, const PrimExpr& expr, bool allow_override) { } void Analyzer::Bind(const Var& var, const Range& range, bool allow_override) { - CHECK(range.defined()); + ICHECK(range.defined()); if (tir::is_one(range->extent)) { this->Bind(var, range->min, allow_override); } else { @@ -64,7 +64,7 @@ void Analyzer::Bind(const Map& variables, bool allow_override) { } void ConstraintContext::EnterWithScope() { - CHECK(exit_ == nullptr); + ICHECK(exit_ == nullptr); // entering the scope. auto f0 = analyzer_->const_int_bound.EnterConstraint(constraint_); auto f1 = analyzer_->modular_set.EnterConstraint(constraint_); @@ -78,7 +78,7 @@ void ConstraintContext::EnterWithScope() { } void ConstraintContext::ExitWithScope() { - CHECK(exit_ != nullptr); + ICHECK(exit_ != nullptr); exit_(); } diff --git a/src/arith/canonical_simplify.cc b/src/arith/canonical_simplify.cc index a88849b42e9f..d0a0702a0fb0 100644 --- a/src/arith/canonical_simplify.cc +++ b/src/arith/canonical_simplify.cc @@ -63,7 +63,7 @@ inline PrimExpr ModImpl(PrimExpr a, PrimExpr b, DivMode mode) { if (mode == kTruncDiv) { return truncmod(a, b); } else { - CHECK_EQ(mode, kFloorDiv); + ICHECK_EQ(mode, kFloorDiv); return floormod(a, b); } } @@ -72,7 +72,7 @@ inline PrimExpr DivImpl(PrimExpr a, PrimExpr b, DivMode mode) { if (mode == kTruncDiv) { return truncdiv(a, b); } else { - CHECK_EQ(mode, kFloorDiv); + ICHECK_EQ(mode, kFloorDiv); return floordiv(a, b); } } @@ -102,7 +102,7 @@ class SplitExprNode : public CanonicalExprNode { DivMode div_mode{kTruncDiv}; /*! \brief verify that this is a valid entry. */ - void Verify() const { CHECK(upper_factor == kPosInf || upper_factor % lower_factor == 0); } + void Verify() const { ICHECK(upper_factor == kPosInf || upper_factor % lower_factor == 0); } PrimExpr NormalizeWithScale(int64_t sscale) const { PrimExpr res = this->index; @@ -118,7 +118,7 @@ class SplitExprNode : public CanonicalExprNode { } sscale *= this->scale; if (sscale != 1) { - CHECK(!dtype.is_uint() || sscale > 0); + ICHECK(!dtype.is_uint() || sscale > 0); res = res * make_const(dtype, sscale); } return res; @@ -209,10 +209,10 @@ class SumExprNode : public CanonicalExprNode { * \param scale The scale to be applied. */ void DivideBy(int64_t scale) { - CHECK_EQ(this->base % scale, 0); + ICHECK_EQ(this->base % scale, 0); this->base /= scale; for (size_t i = 0; i < this->args.size(); ++i) { - CHECK_EQ(args[i]->scale % scale, 0); + ICHECK_EQ(args[i]->scale % scale, 0); args[i].CopyOnWrite()->scale /= scale; } } @@ -508,7 +508,7 @@ class CanonicalSimplifier::Impl : public RewriteSimplifier::Impl { return expr; } expr = ToSplitExpr(Normalize(expr)); - CHECK(expr->DivModeCompatibleTo(div_mode)); + ICHECK(expr->DivModeCompatibleTo(div_mode)); expr.CopyOnWrite()->div_mode = div_mode; return expr; } @@ -648,7 +648,7 @@ void CanonicalSimplifier::Impl::SeparateDivisibleParts(const SumExprNode* psum, } SplitExpr CanonicalSimplifier::Impl::SplitDivConst(SplitExpr lhs, int64_t cval, DivMode div_mode) { - CHECK_GT(cval, 0); + ICHECK_GT(cval, 0); lhs = ConvertDivMode(lhs, div_mode); // the following rule works for both floordiv and truncdiv @@ -682,8 +682,8 @@ SplitExpr CanonicalSimplifier::Impl::SplitDivConst(SplitExpr lhs, int64_t cval, } // directly return the split with cval == 1 lhs = ToSplitExpr(Normalize(lhs)); - CHECK(lhs->DivModeCompatibleTo(div_mode)); - CHECK_EQ(lhs->scale, 1); + ICHECK(lhs->DivModeCompatibleTo(div_mode)); + ICHECK_EQ(lhs->scale, 1); lhs.CopyOnWrite()->lower_factor *= cval; lhs.CopyOnWrite()->div_mode = div_mode; return lhs; @@ -803,7 +803,7 @@ PrimExpr CanonicalSimplifier::Impl::VisitExpr_(const FloorDivNode* op) { } SplitExpr CanonicalSimplifier::Impl::SplitModConst(SplitExpr lhs, int64_t cval, DivMode div_mode) { - CHECK_GT(cval, 0); + ICHECK_GT(cval, 0); lhs = ConvertDivMode(lhs, div_mode); if (lhs->scale % cval == 0) { @@ -842,9 +842,9 @@ SplitExpr CanonicalSimplifier::Impl::SplitModConst(SplitExpr lhs, int64_t cval, } // Normalize the value. lhs = ToSplitExpr(Normalize(lhs)); - CHECK(lhs->DivModeCompatibleTo(div_mode)); - CHECK_EQ(lhs->scale, 1); - CHECK_EQ(lhs->lower_factor, 1); + ICHECK(lhs->DivModeCompatibleTo(div_mode)); + ICHECK_EQ(lhs->scale, 1); + ICHECK_EQ(lhs->lower_factor, 1); lhs.CopyOnWrite()->div_mode = div_mode; lhs.CopyOnWrite()->upper_factor = cval; return lhs; @@ -886,7 +886,7 @@ PrimExpr CanonicalSimplifier::Impl::VisitExpr_(const ModNode* op) { // contonue to use logic below. a = extra; psum = a.as(); - CHECK(psum != nullptr); + ICHECK(psum != nullptr); } } } @@ -948,7 +948,7 @@ PrimExpr CanonicalSimplifier::Impl::VisitExpr_(const FloorModNode* op) { // contonue to use logic below. a = extra; psum = a.as(); - CHECK(psum != nullptr); + ICHECK(psum != nullptr); } } // Simplify the offset constant if necessary. diff --git a/src/arith/const_fold.h b/src/arith/const_fold.h index 876d336454d8..7bc04a184633 100644 --- a/src/arith/const_fold.h +++ b/src/arith/const_fold.h @@ -150,7 +150,7 @@ inline PrimExpr TryConstFold(PrimExpr a, PrimExpr b) { if (pa && pb) { // due to division and mod can have different modes // NOTE: this will assumes truc div. - CHECK_NE(pb->value, 0) << "Divide by zero"; + ICHECK_NE(pb->value, 0) << "Divide by zero"; return IntImm(rtype, pa->value / pb->value); } if (pa) { @@ -158,7 +158,7 @@ inline PrimExpr TryConstFold(PrimExpr a, PrimExpr b) { } if (pb) { if (pb->value == 1) return a; - CHECK_NE(pb->value, 0) << "Divide by zero"; + ICHECK_NE(pb->value, 0) << "Divide by zero"; } if (fa && fb && fb->value != 0) { return FloatImm(rtype, fa->value / fb->value); @@ -166,7 +166,7 @@ inline PrimExpr TryConstFold(PrimExpr a, PrimExpr b) { if (fa && fa->value == 0) return a; if (fb) { if (fb->value == 1) return a; - CHECK_NE(fb->value, 0) << "Divide by zero"; + ICHECK_NE(fb->value, 0) << "Divide by zero"; } }); return PrimExpr(); @@ -177,7 +177,7 @@ inline PrimExpr TryConstFold(PrimExpr a, PrimExpr b) { TVM_INDEX_CONST_PROPAGATION({ const DataType& rtype = a.dtype(); if (pa && pb) { - CHECK_NE(pb->value, 0) << "Divide by zero"; + ICHECK_NE(pb->value, 0) << "Divide by zero"; return IntImm(rtype, pa->value % pb->value); } if (pa) { @@ -185,7 +185,7 @@ inline PrimExpr TryConstFold(PrimExpr a, PrimExpr b) { } if (pb) { if (pb->value == 1) return tir::make_zero(rtype); - CHECK_NE(pb->value, 0) << "Divide by zero"; + ICHECK_NE(pb->value, 0) << "Divide by zero"; } }); return PrimExpr(); @@ -196,7 +196,7 @@ inline PrimExpr TryConstFold(PrimExpr a, PrimExpr b) { TVM_ARITH_CONST_PROPAGATION({ const DataType& rtype = a.dtype(); if (pa && pb) { - CHECK_NE(pb->value, 0) << "Divide by zero"; + ICHECK_NE(pb->value, 0) << "Divide by zero"; return IntImm(rtype, arith::floordiv(pa->value, pb->value)); } if (pa) { @@ -204,7 +204,7 @@ inline PrimExpr TryConstFold(PrimExpr a, PrimExpr b) { } if (pb) { if (pb->value == 1) return a; - CHECK_NE(pb->value, 0) << "Divide by zero"; + ICHECK_NE(pb->value, 0) << "Divide by zero"; } if (fa && fb && fb->value != 0) { return FloatImm(rtype, std::floor(fa->value / fb->value)); @@ -212,7 +212,7 @@ inline PrimExpr TryConstFold(PrimExpr a, PrimExpr b) { if (fa && fa->value == 0) return a; if (fb) { if (fb->value == 1) return a; - CHECK_NE(fb->value, 0) << "Divide by zero"; + ICHECK_NE(fb->value, 0) << "Divide by zero"; } }); return PrimExpr(); @@ -223,7 +223,7 @@ inline PrimExpr TryConstFold(PrimExpr a, PrimExpr b) { TVM_INDEX_CONST_PROPAGATION({ const DataType& rtype = a.dtype(); if (pa && pb) { - CHECK_NE(pb->value, 0) << "Divide by zero"; + ICHECK_NE(pb->value, 0) << "Divide by zero"; return IntImm(rtype, floormod(pa->value, pb->value)); } if (pa) { @@ -231,7 +231,7 @@ inline PrimExpr TryConstFold(PrimExpr a, PrimExpr b) { } if (pb) { if (pb->value == 1) return tir::make_zero(rtype); - CHECK_NE(pb->value, 0) << "Divide by zero"; + ICHECK_NE(pb->value, 0) << "Divide by zero"; } }); return PrimExpr(); diff --git a/src/arith/const_int_bound.cc b/src/arith/const_int_bound.cc index 876b7db188c6..f39ce4b05643 100644 --- a/src/arith/const_int_bound.cc +++ b/src/arith/const_int_bound.cc @@ -109,11 +109,11 @@ class ConstIntBoundAnalyzer::Impl if (!allow_override) { auto it = var_map_.find(var); if (it != var_map_.end()) { - CHECK(it->second == info) << "Trying to update var \'" << var << "\'" - << " with a different const bound: " - << "original=" - << ConstIntBound(it->second.min_value, it->second.max_value) - << ", new=" << ConstIntBound(info.min_value, info.max_value); + ICHECK(it->second == info) + << "Trying to update var \'" << var << "\'" + << " with a different const bound: " + << "original=" << ConstIntBound(it->second.min_value, it->second.max_value) + << ", new=" << ConstIntBound(info.min_value, info.max_value); } } var_map_[var] = info; @@ -155,7 +155,7 @@ class ConstIntBoundAnalyzer::Impl auto val = bound_->find(expr); if (val != bound_->end()) { auto everything = Everything(expr->dtype); - CHECK( + ICHECK( (val->second->min_value == res.min_value && val->second->max_value == res.max_value) || (val->second->min_value == everything.min_value && val->second->max_value == everything.max_value)) @@ -211,7 +211,7 @@ class ConstIntBoundAnalyzer::Impl Entry VisitExpr_(const DivNode* op) final { Entry a = VisitExpr(op->a); Entry b = VisitExpr(op->b); - CHECK(!b.is_const(0)) << "divide by zero"; + ICHECK(!b.is_const(0)) << "divide by zero"; return HandleDivision(a, b, op->dtype, InfAwareDiv); } @@ -230,7 +230,7 @@ class ConstIntBoundAnalyzer::Impl std::min(std::max(a.max_value, (int64_t)0), b_max_cap)); } } else { - CHECK(!b.is_const(0)) << "mod by zero"; + ICHECK(!b.is_const(0)) << "mod by zero"; // mod by negative value is rare, // and we just use the simpliest rule. return Everything(op->dtype); @@ -240,7 +240,7 @@ class ConstIntBoundAnalyzer::Impl Entry VisitExpr_(const FloorDivNode* op) final { Entry a = VisitExpr(op->a); Entry b = VisitExpr(op->b); - CHECK(!b.is_const(0)) << "floordiv by zero"; + ICHECK(!b.is_const(0)) << "floordiv by zero"; return HandleDivision(a, b, op->dtype, InfAwareFloorDiv); } @@ -258,7 +258,7 @@ class ConstIntBoundAnalyzer::Impl return MakeBound(0, b_max_cap); } } else { - CHECK(!b.is_const(0)) << "floormod by zero"; + ICHECK(!b.is_const(0)) << "floormod by zero"; // mod by negative value is rare, // and we just use the simpliest rule. return Everything(op->dtype); @@ -352,7 +352,7 @@ class ConstIntBoundAnalyzer::Impl additional_info_.insert(additional_info_.end(), info.begin(), info.end()); size_t new_size = old_size + info.size(); auto frecover = [old_size, new_size, this]() { - CHECK_EQ(additional_info_.size(), new_size); + ICHECK_EQ(additional_info_.size(), new_size); additional_info_.resize(old_size); }; return frecover; @@ -432,11 +432,11 @@ class ConstIntBoundAnalyzer::Impl */ static int64_t InfAwareAdd(int64_t x, int64_t y) { if (x == kPosInf) { - CHECK(y != kNegInf); + ICHECK(y != kNegInf); return kPosInf; } if (x == kNegInf) { - CHECK(y != kPosInf); + ICHECK(y != kPosInf); return kNegInf; } if (y == kPosInf || y == kNegInf) return y; @@ -464,7 +464,7 @@ class ConstIntBoundAnalyzer::Impl * \return the result. */ static int64_t InfAwareDiv(int64_t x, int64_t y) { - CHECK_NE(y, 0); + ICHECK_NE(y, 0); if (x == kPosInf || x == kNegInf) { if (y > 0) return x; return -x; @@ -478,7 +478,7 @@ class ConstIntBoundAnalyzer::Impl * \return the result. */ static int64_t InfAwareFloorDiv(int64_t x, int64_t y) { - CHECK_NE(y, 0); + ICHECK_NE(y, 0); if (x == kPosInf || x == kNegInf) { if (y > 0) return x; return -x; diff --git a/src/arith/domain_touched.cc b/src/arith/domain_touched.cc index d59486cfcd79..3c3da5f4b99b 100644 --- a/src/arith/domain_touched.cc +++ b/src/arith/domain_touched.cc @@ -67,7 +67,7 @@ class BufferTouchedDomain final : public StmtExprVisitor { void VisitStmt_(const AttrStmtNode* op) final { if (op->attr_key == tir::attr::thread_extent) { const IterVarNode* thread_axis = op->node.as(); - CHECK(thread_axis); + ICHECK(thread_axis); const VarNode* var = thread_axis->var.get(); dom_map_[var] = IntSet::FromRange(Range(make_zero(op->value.dtype()), op->value)); StmtExprVisitor::VisitStmt_(op); diff --git a/src/arith/int_constraints.cc b/src/arith/int_constraints.cc index 56c95d0ab713..3a668c2331e7 100644 --- a/src/arith/int_constraints.cc +++ b/src/arith/int_constraints.cc @@ -43,9 +43,9 @@ Array AsConditions(const Array& variables, const Map res; // use variables to keep the order of iteration // so as to get rid of any non-determinism. - CHECK_EQ(variables.size(), bounds.size()); + ICHECK_EQ(variables.size(), bounds.size()); for (const auto v : variables) { - CHECK(bounds.count(v)); + ICHECK(bounds.count(v)); const auto& bnds = bounds[v]; PrimExpr lhs = bnds->coef * v; for (const PrimExpr& rhs : bnds->equal) { @@ -66,7 +66,7 @@ Array AsConditions(const Array& variables, const Map lower, Array equal, Array upper) { - CHECK(coef.dtype().is_int() || coef.dtype().is_uint()) + ICHECK(coef.dtype().is_int() || coef.dtype().is_uint()) << "Coefficient in IntGroupBounds must be integers"; ObjectPtr node = make_object(); node->coef = std::move(coef); @@ -178,7 +178,7 @@ Range IntGroupBounds::FindBestRange(const Map& vranges_addl) const { } if (!best_lower.defined()) { - CHECK(!best_diff_over.defined()); + ICHECK(!best_diff_over.defined()); return Range(); } return Range::FromMinExtent(best_lower, analyzer.Simplify(best_diff_over + 1)); @@ -196,7 +196,7 @@ TVM_REGISTER_GLOBAL("arith.IntGroupBounds_from_range").set_body_typed(IntGroupBo TVM_REGISTER_GLOBAL("arith.IntGroupBounds_FindBestRange") .set_body([](TVMArgs args, TVMRetValue* ret) { - CHECK(args.size() == 1 || args.size() == 2); + ICHECK(args.size() == 1 || args.size() == 2); IntGroupBounds bounds = args[0]; if (args.size() == 1) { *ret = bounds.FindBestRange(); @@ -221,9 +221,9 @@ IntConstraints::IntConstraints(Array variables, Map ranges, if (!ranges.defined()) { ranges = Map(); } - CHECK(relations.defined()); + ICHECK(relations.defined()); for (const auto& var : variables) { - CHECK(var.dtype().is_int() || var.dtype().is_uint()) + ICHECK(var.dtype().is_int() || var.dtype().is_uint()) << "Variables in IntConstraints must be integers"; } node->variables = std::move(variables); @@ -259,7 +259,7 @@ IntConstraintsTransform::IntConstraintsTransform(IntConstraints src, IntConstrai IntConstraintsTransform IntConstraintsTransform::operator+( const IntConstraintsTransform& other) const { - CHECK(other->src.same_as(operator->()->dst)); + ICHECK(other->src.same_as(operator->()->dst)); Map dst_to_src; Map src_to_dst; diff --git a/src/arith/int_set.cc b/src/arith/int_set.cc index 9940d1f60b39..6490f67e1b1a 100644 --- a/src/arith/int_set.cc +++ b/src/arith/int_set.cc @@ -412,7 +412,7 @@ class IntervalSetEvaluator : public ExprFunctor { IntervalSet VisitExpr_(const OrNode* op) final { return VisitBinaryExpr_(op); } IntervalSet VisitExpr_(const RampNode* op) final { - CHECK(eval_vec_); + ICHECK(eval_vec_); IntervalSet base = Eval(op->base); PVar stride; if (stride.Match(op->stride)) { @@ -431,7 +431,7 @@ class IntervalSetEvaluator : public ExprFunctor { } IntervalSet VisitExpr_(const BroadcastNode* op) final { - CHECK(eval_vec_); + ICHECK(eval_vec_); return VisitExpr(op->value); } @@ -506,7 +506,7 @@ Range IntSet::CoverRange(Range max_range) const { IntSet temp; Analyzer analyzer; const IntervalSetNode* s_int = (*this).as(); - CHECK(s_int != nullptr); + ICHECK(s_int != nullptr); if (s_int->HasUpperBound() && s_int->HasLowerBound()) { return Range::FromMinExtent(s_int->min_value, analyzer.Simplify(s_int->max_value + 1 - s_int->min_value)); @@ -516,13 +516,13 @@ Range IntSet::CoverRange(Range max_range) const { PrimExpr IntSet::min() const { const IntervalSetNode* s_int = (*this).as(); - CHECK(s_int); + ICHECK(s_int); return s_int->min_value; } PrimExpr IntSet::max() const { const IntervalSetNode* s_int = (*this).as(); - CHECK(s_int); + ICHECK(s_int); return s_int->max_value; } @@ -584,7 +584,7 @@ SignType IntSet::GetSignType() const { } PrimExpr IntSet::PointValue() const { const IntervalSetNode* s_int = (*this).as(); - CHECK(s_int && s_int->IsSinglePoint()); + ICHECK(s_int && s_int->IsSinglePoint()); return s_int->min_value; } diff --git a/src/arith/ir_mutator_with_analyzer.cc b/src/arith/ir_mutator_with_analyzer.cc index 8fb69b31857a..7bc0d946ade7 100644 --- a/src/arith/ir_mutator_with_analyzer.cc +++ b/src/arith/ir_mutator_with_analyzer.cc @@ -96,7 +96,7 @@ Stmt IRMutatorWithAnalyzer::VisitStmt_(const IfThenElseNode* op) { Stmt IRMutatorWithAnalyzer::VisitStmt_(const AttrStmtNode* op) { if (op->attr_key == tir::attr::thread_extent || op->attr_key == tir::attr::virtual_thread) { IterVar iv = Downcast(op->node); - CHECK_NE(iv->thread_tag.length(), 0U); + ICHECK_NE(iv->thread_tag.length(), 0U); analyzer_->Bind(iv->var, Range::FromMinExtent(0, op->value)); Stmt stmt = StmtExprMutator::VisitStmt_(op); return stmt; diff --git a/src/arith/ir_visitor_with_analyzer.h b/src/arith/ir_visitor_with_analyzer.h index 388720ad29c0..058abc8c7d20 100644 --- a/src/arith/ir_visitor_with_analyzer.h +++ b/src/arith/ir_visitor_with_analyzer.h @@ -44,7 +44,7 @@ class IRVisitorWithAnalyzer final : public StmtExprVisitor { void VisitStmt_(const AttrStmtNode* op) { if (op->attr_key == attr::thread_extent || op->attr_key == attr::virtual_thread) { IterVar iv = Downcast(op->node); - CHECK_NE(iv->thread_tag.length(), 0U); + ICHECK_NE(iv->thread_tag.length(), 0U); analyzer_.Bind(iv->var, Range::FromMinExtent(0, op->value)); StmtExprVisitor::VisitStmt_(op); } else { diff --git a/src/arith/iter_affine_map.cc b/src/arith/iter_affine_map.cc index e56ef2a75ee1..283ffa646567 100644 --- a/src/arith/iter_affine_map.cc +++ b/src/arith/iter_affine_map.cc @@ -336,7 +336,7 @@ class IterMapRewriter : public ExprMutator { } else if (const auto* op = expr.as()) { return IterSumExpr({GetRef(op)}, make_zero(expr->dtype)); } else { - CHECK(!expr->IsInstance()); + ICHECK(!expr->IsInstance()); return IterSumExpr({}, expr); } } @@ -566,7 +566,7 @@ PrimExpr IterMapRewriter::VisitExpr_(const MulNode* op) { MulToLhs(ret.CopyOnWrite(), b); return std::move(ret); } else { - CHECK(a->IsInstance()); + ICHECK(a->IsInstance()); IterSplitExpr ret = Downcast(std::move(a)); ret.CopyOnWrite()->scale *= b; return std::move(ret); @@ -639,7 +639,7 @@ PrimExpr IterMapRewriter::VisitExpr_(const FloorDivNode* op) { return FloorDiv(a, b); } } else { - CHECK(a->IsInstance()); + ICHECK(a->IsInstance()); IterSplitExpr ret = Downcast(std::move(a)); return SplitFloorDivConst(ret, b); } @@ -707,7 +707,7 @@ PrimExpr IterMapRewriter::VisitExpr_(const FloorModNode* op) { return FloorMod(a, b); } } else { - CHECK(a->IsInstance()); + ICHECK(a->IsInstance()); IterSplitExpr ret = Downcast(std::move(a)); return SplitFloorModConst(ret, b); } diff --git a/src/arith/modular_set.cc b/src/arith/modular_set.cc index 9826769a5c65..ac176b2623a3 100644 --- a/src/arith/modular_set.cc +++ b/src/arith/modular_set.cc @@ -67,7 +67,7 @@ struct ModularSetAnalyzer::Entry { Entry() = default; Entry(int64_t coeff, int64_t base) { - CHECK_GE(coeff, 0); + ICHECK_GE(coeff, 0); this->coeff = coeff; if (coeff != 0) { base = base % coeff; @@ -93,10 +93,10 @@ class ModularSetAnalyzer::Impl : public ExprFunctorsecond == info) << "Trying to update var \'" << var << "\'" - << " with a different const bound: " - << "original=" << ModularSet(it->second.coeff, it->second.base) - << ", new=" << info; + ICHECK(it->second == info) + << "Trying to update var \'" << var << "\'" + << " with a different const bound: " + << "original=" << ModularSet(it->second.coeff, it->second.base) << ", new=" << info; } } var_map_[var] = Entry(info->coeff, info->base); @@ -165,7 +165,7 @@ class ModularSetAnalyzer::Impl : public ExprFunctor a x diff --git a/src/arith/pattern_match.h b/src/arith/pattern_match.h index 78ae446d0321..01baaa8d13a2 100644 --- a/src/arith/pattern_match.h +++ b/src/arith/pattern_match.h @@ -49,10 +49,10 @@ * arith::PVar v; * // We can match integer and Var, both of which are * // special case container of Expr - * CHECK((v * c).Match(tx * 3)); - * CHECK_EQ(c.Eval()->value, 3); + * ICHECK((v * c).Match(tx * 3)); + * ICHECK_EQ(c.Eval()->value, 3); * // cannot match c to ty - * CHECK(!(v * c).Match(tx * ty)); + * ICHECK(!(v * c).Match(tx * ty)); * * \endcode * @@ -199,7 +199,7 @@ class PVar : public Pattern> { } T Eval() const { - CHECK(filled_); + ICHECK(filled_); return value_; } diff --git a/src/arith/rewrite_simplify.cc b/src/arith/rewrite_simplify.cc index cb8ef01e7369..a58e4433dadd 100644 --- a/src/arith/rewrite_simplify.cc +++ b/src/arith/rewrite_simplify.cc @@ -109,9 +109,9 @@ void RewriteSimplifier::Impl::Update(const Var& var, const PrimExpr& info, bool if (!can_override) { auto it = var_map_.find(var); if (it != var_map_.end()) { - CHECK(ExprDeepEqual()(it->second, info)) << "Trying to update var \'" << var << "\'" - << " with a different value: " - << "original=" << it->second << ", new=" << info; + ICHECK(ExprDeepEqual()(it->second, info)) << "Trying to update var \'" << var << "\'" + << " with a different value: " + << "original=" << it->second << ", new=" << info; } } var_map_[var] = info; @@ -222,7 +222,7 @@ std::function RewriteSimplifier::Impl::EnterConstraint(const PrimExpr& c literal_constraints_.push_back(operator()(constraint)); size_t new_literal_size = literal_constraints_.size(); auto frecover = [old_literal_size, new_literal_size, this]() { - CHECK_EQ(literal_constraints_.size(), new_literal_size); + ICHECK_EQ(literal_constraints_.size(), new_literal_size); literal_constraints_.resize(old_literal_size); }; return frecover; @@ -461,8 +461,8 @@ PrimExpr RewriteSimplifier::Impl::VisitExpr_(const DivNode* op) { // x / 2.0 = x * 0.5 if (const FloatImmNode* ptr = op->b.as()) { - CHECK(op->dtype.is_float() || - datatype::Registry::Global()->GetTypeRegistered(op->dtype.code())); + ICHECK(op->dtype.is_float() || + datatype::Registry::Global()->GetTypeRegistered(op->dtype.code())); return op->a * make_const(op->b.dtype(), 1.0 / ptr->value); } diff --git a/src/arith/solve_linear_equation.cc b/src/arith/solve_linear_equation.cc index cda1ec230cbc..22bf7360563d 100644 --- a/src/arith/solve_linear_equation.cc +++ b/src/arith/solve_linear_equation.cc @@ -42,8 +42,8 @@ void SmithNormalFormDiag(std::vector>* S, std::vectorempty() || V->empty()) return; size_t m = S->size(); size_t n = (*S)[0].size(); // n is # of variables - CHECK_EQ(V->size(), n); - CHECK_EQ((*V)[0].size(), n); + ICHECK_EQ(V->size(), n); + ICHECK_EQ((*V)[0].size(), n); for (size_t index = 0; index < std::min(m, n); ++index) { // Here A is partially diagonalized, that is A[i, j] is zero for all i, j diff --git a/src/arith/solve_linear_inequality.cc b/src/arith/solve_linear_inequality.cc index eec916ac6c22..f4de9ffb197b 100644 --- a/src/arith/solve_linear_inequality.cc +++ b/src/arith/solve_linear_inequality.cc @@ -268,7 +268,7 @@ PartialSolvedInequalities SolveLinearInequalities(const IntConstraints& system_t Map res_bounds; for (const Var& v : system_to_solve->variables) { - CHECK(!res_bounds.count(v)) + ICHECK(!res_bounds.count(v)) << "Variable " << v << " appears more than one time in the `variables` which might be a bug"; @@ -436,7 +436,7 @@ IntConstraints SolveInequalitiesToRange(const IntConstraints& inequalities) { analyzer.Bind(vranges); const Var& var = *it; - CHECK(solved_bounds.count(var)); + ICHECK(solved_bounds.count(var)); auto bnd = solved_bounds[var]; if (is_one(bnd->coef) && !bnd->equal.empty()) { // There is an equation of the form `v == expr`, so this variable can be completely removed. diff --git a/src/auto_scheduler/compute_dag.cc b/src/auto_scheduler/compute_dag.cc index 75fd27ef9fa8..c6cf094ee202 100755 --- a/src/auto_scheduler/compute_dag.cc +++ b/src/auto_scheduler/compute_dag.cc @@ -553,7 +553,7 @@ class FlopEstimator : public ExprFunctor { if (pop->attrs.count("FLOP")) { // Use user-provided FLOP auto pint = pop->attrs["FLOP"].as(); - CHECK(pint != nullptr); + ICHECK(pint != nullptr); ret += pint->value; } else { // Estimate by parsing the compute body @@ -719,11 +719,11 @@ class IndexRewriter : public StmtExprMutator { for (const auto& arg : op->indices) { std::string axis_name; if (const auto* int_imm = arg.as()) { - CHECK_EQ(int_imm->value, 0); + ICHECK_EQ(int_imm->value, 0); axis_name = "IntImm"; } else { axis_name = AxisBaseName(CleanName(Downcast(arg)->name_hint)); - CHECK_EQ(name_to_arg.count(axis_name), 0); + ICHECK_EQ(name_to_arg.count(axis_name), 0); name_to_arg[axis_name] = arg; } } @@ -733,7 +733,7 @@ class IndexRewriter : public StmtExprMutator { for (int i = new_names_.size() - 1; i >= 0; --i) { auto ori_iter_name = new_names_[i]; auto name_it = name_to_arg.find(ori_iter_name); - CHECK(name_it != name_to_arg.end()); + ICHECK(name_it != name_to_arg.end()); PrimExpr ori_arg = name_it->second; PrimExpr mod_factor = new_shape_[i]; @@ -772,12 +772,12 @@ std::string GetOrigLayout(std::set* placeholder_axis_names, const t std::ostringstream os; uint32_t i = 0; const auto& placeholder_op = placeholder->op; - CHECK_GT(extractor.read_access.count(placeholder_op), 0); + ICHECK_GT(extractor.read_access.count(placeholder_op), 0); for (const auto& ev : extractor.read_access[placeholder_op]) { for (const auto& e : ev) { std::string axis_name; if (const auto* int_imm = e.as()) { - CHECK_EQ(int_imm->value, 0); + ICHECK_EQ(int_imm->value, 0); axis_name = "IntImm"; } else { axis_name = AxisBaseName(CleanName(Downcast(e)->name_hint)); @@ -788,7 +788,7 @@ std::string GetOrigLayout(std::set* placeholder_axis_names, const t } } - CHECK_EQ(placeholder_axis_names->size(), placeholder->shape.size()); + ICHECK_EQ(placeholder_axis_names->size(), placeholder->shape.size()); std::string orig_layout = os.str(); os.str(""); // TODO(minmin): uncomment this line for relay integration @@ -837,7 +837,7 @@ std::string GetNewLayout(Array* new_shape, const State& state, const i ExtractOriginalIterators(iter->name, &ori_iter_names); // fused iters have been replaced with iter->orig_iters. // So there should be only one ori iter name extracted from iter->name. - CHECK_EQ(ori_iter_names.size(), 1); + ICHECK_EQ(ori_iter_names.size(), 1); auto ori_iter_name = AxisBaseName(*ori_iter_names.begin()); new_axis_names.push_back(ori_iter_name); } @@ -937,7 +937,7 @@ void ComputeDAG::RewriteLayout(const Array& transform_steps) { new_body.push_back(index_rewriter.Rewrite(body)); } old_compute_op = op; - CHECK(!new_compute_op.defined()); + ICHECK(!new_compute_op.defined()); new_compute_op = te::ComputeOp(pop->name, pop->tag, pop->attrs, pop->axis, new_body); } } @@ -1109,7 +1109,7 @@ String ComputeDAG::PrintStepsAsPython(const Array& transform_steps) const } State ComputeDAG::InferBound(const State& state) const { - CHECK(state->concrete) << "Only concrete state can be processed to get bound info."; + ICHECK(state->concrete) << "Only concrete state can be processed to get bound info."; State ret_state; StateNode* pstate; @@ -1267,7 +1267,7 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) ss << ".v" << k; } if (auto preduce = pop->body[k].as()) { - CHECK_LT(k, preduce->combiner->result.size()); + ICHECK_LT(k, preduce->combiner->result.size()); PrimExpr combiner = preduce->combiner->result[k]; if (combiner->IsInstance()) { ss << " += " << preduce->source[0] << "\n"; @@ -1300,7 +1300,7 @@ TVM_REGISTER_GLOBAL("auto_scheduler.ComputeDAG") if (tensors) { return ComputeDAG(tensors.value()); } - CHECK(sch) << "Both tensors and schedule are null"; + ICHECK(sch) << "Both tensors and schedule are null"; return ComputeDAG(sch.value()); }); diff --git a/src/auto_scheduler/cost_model.cc b/src/auto_scheduler/cost_model.cc index 3d540c7b6610..4ed5ca2bfbe8 100755 --- a/src/auto_scheduler/cost_model.cc +++ b/src/auto_scheduler/cost_model.cc @@ -34,7 +34,7 @@ TVM_REGISTER_OBJECT_TYPE(PythonBasedModelNode); RandomModel::RandomModel() { ObjectPtr node = make_object(); const auto* f = runtime::Registry::Get("auto_scheduler.cost_model.random_fill_float"); - CHECK(f != nullptr); + ICHECK(f != nullptr); node->random_number_func = reinterpret_cast*>(f); data_ = std::move(node); } @@ -109,7 +109,7 @@ void PythonBasedModelNode::PredictStages(const SearchTask& task, const Array(flatten_scores[idx++]); @@ -134,7 +134,7 @@ void PythonBasedModelNode::PredictStages(const SearchTask& task, const Arraypush_back(std::move(scores)); } idx += s_length; diff --git a/src/auto_scheduler/feature.cc b/src/auto_scheduler/feature.cc index 15066a98e2bc..8d17c4bba10f 100755 --- a/src/auto_scheduler/feature.cc +++ b/src/auto_scheduler/feature.cc @@ -298,7 +298,7 @@ class MathOpCounter : public StmtExprVisitor { void VisitExpr_(const CallNode* op) final { auto* pop = op->op.as(); - CHECK(pop != nullptr); + ICHECK(pop != nullptr); auto effect_kind = op_call_effect_[GetRef(pop)]; bool is_pure = effect_kind == CallEffectKind::kPure || effect_kind == CallEffectKind::kExprAnnotation; @@ -937,7 +937,7 @@ class PerStoreFeatureExtractor : public StmtExprVisitor { while (compute_ops_list[pt] < cur_compute_ops - 1e-4) { pt++; } - CHECK_LT(pt, compute_ops_list.size()); + ICHECK_LT(pt, compute_ops_list.size()); float value; if (pt == 0) { @@ -1323,7 +1323,7 @@ void GetPerStoreFeaturesWorkerFunc(const SearchTask& task, const State& state, i tir::transform::Sequential(Array{tir::transform::Simplify()}); mod = optimize(std::move(mod)); const auto& it = mod->functions.find(global_var); - CHECK(it != mod->functions.end()); + ICHECK(it != mod->functions.end()); const auto& prim_func = (*it).second.as(); GetPerStoreFeature(prim_func->body, task->hardware_params->cache_line_bytes, max_n_bufs, feature); @@ -1389,7 +1389,7 @@ void GetPerStoreFeaturesFromFile(const std::string& filename, int max_lines, int const auto* workload_key_to_tensors = tvm::runtime::Registry::Get("auto_scheduler.workload_key_to_tensors"); - CHECK(workload_key_to_tensors != nullptr); + ICHECK(workload_key_to_tensors != nullptr); // read from file RecordReader reader(filename); @@ -1454,7 +1454,7 @@ void GetPerStoreFeaturesFromMeasurePairs(const Array& inputs, const auto* workload_key_to_tensors = tvm::runtime::Registry::Get("auto_scheduler.workload_key_to_tensors"); - CHECK(workload_key_to_tensors != nullptr); + ICHECK(workload_key_to_tensors != nullptr); tasks.reserve(inputs.size()); normalized_throughputs->reserve(inputs.size()); @@ -1548,7 +1548,7 @@ TVMByteArray SerializeFeatures(std::vector>&& features, size_vector.push_back(static_cast(task_ids.size())); total_bytes += sizeof(int) * task_ids.size(); - CHECK_EQ(size_vector.size(), size_vector_size); + ICHECK_EQ(size_vector.size(), size_vector_size); // allocate memory out_data->reserve(total_bytes); @@ -1574,7 +1574,7 @@ TVMByteArray SerializeFeatures(std::vector>&& features, memmove(ptr, reinterpret_cast(task_ids.data()), task_ids.size() * sizeof(int)); ptr += task_ids.size() * sizeof(int); - CHECK_EQ(ptr - out_data->data(), total_bytes); + ICHECK_EQ(ptr - out_data->data(), total_bytes); return TVMByteArray{out_data->data(), total_bytes}; } diff --git a/src/auto_scheduler/loop_state.cc b/src/auto_scheduler/loop_state.cc index c3c764fc8e2b..23d6eb64da6c 100755 --- a/src/auto_scheduler/loop_state.cc +++ b/src/auto_scheduler/loop_state.cc @@ -114,7 +114,7 @@ void AttachMap::DeleteStage(int stage_id) { void AttachMap::UpdateIters(const std::vector& original_iters, const std::vector& new_iters) { - CHECK_EQ(original_iters.size(), new_iters.size()); + ICHECK_EQ(original_iters.size(), new_iters.size()); AttachMapNode* pnode = CopyOnWrite(); std::unordered_map> new_iter_to_attached_stages; for (size_t i = 0; i < original_iters.size(); ++i) { @@ -265,8 +265,8 @@ void State::pragma(int stage_id, const Iterator& it, const String& pragma_type) void State::reorder(int stage_id, const Array& order) { const Stage& stage = operator->()->stages[stage_id]; - CHECK_EQ(order.size(), stage->iters.size()) << "The order of all iterators " - << "should be specified"; + ICHECK_EQ(order.size(), stage->iters.size()) << "The order of all iterators " + << "should be specified"; Array after_ids; GetIndices(stage->iters, order, &after_ids); ReorderStep step = ReorderStep(stage_id, after_ids); diff --git a/src/auto_scheduler/measure.cc b/src/auto_scheduler/measure.cc index c3ee6a1495e3..6c5c10e5aaee 100755 --- a/src/auto_scheduler/measure.cc +++ b/src/auto_scheduler/measure.cc @@ -303,7 +303,7 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) auto old_config = p->stream.precision(4); for (size_t i = 0; i < node->costs.size(); ++i) { auto pf = node->costs[i].as(); - CHECK(pf != nullptr); + ICHECK(pf != nullptr); p->stream << pf->value; if (i != node->costs.size() - 1) { p->stream << ","; diff --git a/src/auto_scheduler/measure_record.cc b/src/auto_scheduler/measure_record.cc index 99c01b17e78e..66f521e17e80 100755 --- a/src/auto_scheduler/measure_record.cc +++ b/src/auto_scheduler/measure_record.cc @@ -53,7 +53,7 @@ struct Handler<::tvm::Array<::tvm::auto_scheduler::Stage>> { bool s; reader->BeginArray(); s = reader->NextArrayItem(); - CHECK(!s); + ICHECK(!s); } }; @@ -80,7 +80,7 @@ struct Handler<::tvm::Array<::tvm::auto_scheduler::Step>> { reader->BeginArray(); data->push_back(::tvm::auto_scheduler::StepReadFromRecord(reader)); s = reader->NextArrayItem(); - CHECK(!s); + ICHECK(!s); } } }; @@ -97,13 +97,13 @@ struct Handler<::tvm::auto_scheduler::StateNode> { bool s; reader->BeginArray(); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&data->stages); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&data->transform_steps); s = reader->NextArrayItem(); - CHECK(!s); + ICHECK(!s); } }; @@ -121,15 +121,15 @@ struct Handler<::tvm::auto_scheduler::SearchTaskNode> { std::string str_value; reader->BeginArray(); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&str_value); data->workload_key = std::move(str_value); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&str_value); data->target = ::tvm::Target(str_value); s = reader->NextArrayItem(); - CHECK(!s); + ICHECK(!s); } }; @@ -150,13 +150,13 @@ struct Handler<::tvm::auto_scheduler::MeasureInputNode> { bool s; reader->BeginArray(); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(task_node.get()); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(state_node.get()); s = reader->NextArrayItem(); - CHECK(!s); + ICHECK(!s); data->task = ::tvm::auto_scheduler::SearchTask(task_node); data->state = ::tvm::auto_scheduler::State(state_node); @@ -172,7 +172,7 @@ struct Handler<::tvm::auto_scheduler::MeasureResultNode> { writer->BeginArray(false); for (const auto& x : data.costs) { auto pf = x.as<::tvm::tir::FloatImmNode>(); - CHECK(pf != nullptr) << "Cost can only contain float values"; + ICHECK(pf != nullptr) << "Cost can only contain float values"; writer->WriteArrayItem(pf->value); } writer->EndArray(); @@ -187,23 +187,23 @@ struct Handler<::tvm::auto_scheduler::MeasureResultNode> { bool s; reader->BeginArray(); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&double_list); data->costs.clear(); for (const auto& i : double_list) { data->costs.push_back(::tvm::FloatImm(::tvm::DataType::Float(64), i)); } s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&data->error_no); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&data->all_cost); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&data->timestamp); s = reader->NextArrayItem(); - CHECK(!s); + ICHECK(!s); } }; diff --git a/src/auto_scheduler/search_policy/empty_policy.cc b/src/auto_scheduler/search_policy/empty_policy.cc index fba1ac2f42f8..79f98793d848 100644 --- a/src/auto_scheduler/search_policy/empty_policy.cc +++ b/src/auto_scheduler/search_policy/empty_policy.cc @@ -57,7 +57,7 @@ State EmptyPolicyNode::Search(int num_measure_trials, int early_stopping, // Measure is disabled if num_measure_trials <= 1 if (num_measure_trials <= 1) { const auto& res = SearchOneRound(); - CHECK_GT(res.size(), 0); + ICHECK_GT(res.size(), 0); return res[0]; } else { diff --git a/src/auto_scheduler/search_policy/search_policy.cc b/src/auto_scheduler/search_policy/search_policy.cc index 8b6d22bb2725..702eec087668 100644 --- a/src/auto_scheduler/search_policy/search_policy.cc +++ b/src/auto_scheduler/search_policy/search_policy.cc @@ -39,7 +39,7 @@ void SearchPolicyNode::PreloadMeasuredStates(const String& log_file) { RecordReader reader = RecordReader(log_file); const auto& res = reader->ReadLines(-1); size_t log_size = res.first.size(); - CHECK_EQ(log_size, res.second.size()); + ICHECK_EQ(log_size, res.second.size()); if (log_size) { Array measured_states; std::vector measured_throughputs; diff --git a/src/auto_scheduler/search_policy/sketch_policy.cc b/src/auto_scheduler/search_policy/sketch_policy.cc index 60178b342e62..5d6d1d28be1c 100644 --- a/src/auto_scheduler/search_policy/sketch_policy.cc +++ b/src/auto_scheduler/search_policy/sketch_policy.cc @@ -147,7 +147,7 @@ State SketchPolicyNode::Search(int n_trials, int early_stopping, int num_measure if (n_trials <= 1) { // No measurement is allowed const Array& best_states = SearchOneRound(0); - CHECK_GT(best_states.size(), 0); + ICHECK_GT(best_states.size(), 0); return best_states[0]; } else { int num_random = @@ -348,10 +348,10 @@ Array SketchPolicyNode::GenerateSketches() { auto pstate = state.CopyOnWrite(); for (size_t step_id = 0; step_id < pstate->transform_steps.size(); ++step_id) { if (pstate->transform_steps[step_id]->IsInstance()) { - CHECK_GE(step_id, 1); + ICHECK_GE(step_id, 1); int split_step_id = static_cast(step_id - 1); auto step = pstate->transform_steps[split_step_id].as(); - CHECK(step != nullptr); + ICHECK(step != nullptr); pstate->transform_steps.Set( split_step_id, SplitStep(step->stage_id, step->iter_id, step->extent, {NullOpt}, step->inner_to_outer)); diff --git a/src/auto_scheduler/search_policy/sketch_policy_rules.cc b/src/auto_scheduler/search_policy/sketch_policy_rules.cc index 1b965c9886a1..1b6cc06a4c45 100644 --- a/src/auto_scheduler/search_policy/sketch_policy_rules.cc +++ b/src/auto_scheduler/search_policy/sketch_policy_rules.cc @@ -115,7 +115,8 @@ SketchGenerationRule::ConditionKind RuleMultiLevelTilingWithFusion::MeetConditio std::vector> RuleMultiLevelTilingWithFusion::Apply( const SketchPolicyNode& policy, const State& state, int stage_id) const { int target_stage_id; - CHECK(HasSingleElementwiseMatchedConsumer(policy.search_task, state, stage_id, &target_stage_id)); + ICHECK( + HasSingleElementwiseMatchedConsumer(policy.search_task, state, stage_id, &target_stage_id)); const std::string& multi_level_tiling_structure = IsGPUTask(policy.search_task) ? GetStringParam(policy.params, SketchParamKey::MultiLevelTiling::gpu_structure) @@ -296,7 +297,7 @@ std::vector> RuleSimplifyComputeWithConstTensor::Apply( unrolled_inner_iters.push_back(tmp_s.unroll(stage_id, iter)); } else { // tile other space indices - CHECK(iter->iter_kind == IteratorKind::kSpatial); + ICHECK(iter->iter_kind == IteratorKind::kSpatial); tiled_outer_iters.push_back( tmp_s.split(stage_id, iter, Array>(tile_level - 1, NullOpt))); } @@ -319,7 +320,7 @@ std::vector> RuleSimplifyComputeWithConstTensor::Apply( SketchGenerationRule::ConditionKind RuleCrossThreadReduction::MeetCondition( const SketchPolicyNode& policy, const State& state, int stage_id) const { - CHECK(IsGPUTask(policy.search_task)); + ICHECK(IsGPUTask(policy.search_task)); // If it is an intermediate state created by RuleAddCacheWrite, // we just skip it. @@ -386,14 +387,14 @@ std::vector> RuleCrossThreadReduction::Apply(const SketchP // If the target stage does not have split step, // it must be a simple stage without reduce iters. // We then should do a split for it. - CHECK(!HasReduceIter(target_stage)); + ICHECK(!HasReduceIter(target_stage)); const auto& split_res = tmp_s.split(target_stage_id, target_stage->iters.back(), {Integer(task->hardware_params->warp_size)}); tmp_s.bind(target_stage_id, split_res[1], IteratorAnnotation::kThreadX); split_step_ids.push_back(tmp_s->transform_steps.size() - 2); } - CHECK_EQ(split_step_ids.size(), 1); + ICHECK_EQ(split_step_ids.size(), 1); const Iterator& target_iter = tmp_s->stages[target_stage_id]->iters[num_common_outer - 1]; const auto& split_res = tmp_s.follow_split(stage_id, fused_reduce_iter, split_step_ids[0], 1); @@ -429,13 +430,13 @@ std::vector> RuleSpecialComputeLocationGPU::Apply( const SketchPolicyNode& policy, const State& state, int stage_id) const { State tmp_s = state; const std::set& consumers = GetConsumers(policy.search_task, state, stage_id); - CHECK_EQ(consumers.size(), 1); + ICHECK_EQ(consumers.size(), 1); // Get the last outer space iterator that is not unrolled. const Stage& target_stage = state->stages[*consumers.begin()]; for (size_t i = 0; i < target_stage->iters.size(); ++i) { if (target_stage->iters[i]->annotation == IteratorAnnotation::kUnroll) { - CHECK_GT(i, 0); + ICHECK_GT(i, 0); tmp_s.compute_at(stage_id, *consumers.begin(), target_stage->iters[i - 1]); break; @@ -467,7 +468,7 @@ PopulationGenerationRule::ResultKind InitFillTileSize::Apply(SketchPolicyNode* p continue; } - CHECK(ps->extent); + ICHECK(ps->extent); int extent = GetIntImm(ps->extent.value()); const auto& candidate_lens = policy->split_memo.GetFactorizationSchemes( extent, ps->lengths.size(), max_innermost_split_factor); @@ -720,10 +721,10 @@ PopulationGenerationRule::ResultKind InitThreadBind::Apply(SketchPolicyNode* pol } else if (stage->compute_at != ComputeAtKind::kIter) { // This stage is not multi-level tiled, // so it must be produced by RuleCrossThreadReduction. - CHECK(HasCrossThreadReduction(*state, stage_id)); + ICHECK(HasCrossThreadReduction(*state, stage_id)); } else { const auto res = (*state)->attach_map->stage_to_attach_iter.find(stage_id); - CHECK(res != (*state)->attach_map->stage_to_attach_iter.end()); + ICHECK(res != (*state)->attach_map->stage_to_attach_iter.end()); multi_level_tiling_root_set.insert(res->second.first); } } @@ -782,9 +783,9 @@ PopulationGenerationRule::ResultKind InitThreadBind::Apply(SketchPolicyNode* pol std::vector to_fuse; int total_space_extent = 1; for (const auto& i : pop->root_iter_vars()) { - CHECK(i->dom.defined()); + ICHECK(i->dom.defined()); const auto& pint = i->dom->extent.as(); - CHECK(pint); + ICHECK(pint); total_space_extent *= pint->value; } @@ -847,7 +848,7 @@ PopulationGenerationRule::ResultKind InitThreadBind::Apply(SketchPolicyNode* pol // Do cooperative fetching for the cache read stage. // Get spatial_split_step_ids from the root stage const auto& it = (*state)->attach_map->stage_to_attach_iter.find(stage_id); - CHECK(it != (*state)->attach_map->stage_to_attach_iter.end()); + ICHECK(it != (*state)->attach_map->stage_to_attach_iter.end()); Array spatial_split_step_ids = GetSpatialSplitStepIds(*state, it->second.first); // Fuse all iterators to do cooperative fetching @@ -897,7 +898,7 @@ PopulationGenerationRule::ResultKind MutateTileSize::Apply(SketchPolicyNode* pol do { step_id = split_step_ids[(*rand_gen)() % split_step_ids.size()]; ps = (*state)->transform_steps[step_id].as(); - CHECK(ps != nullptr); + ICHECK(ps != nullptr); extent = GetIntImm(ps->extent.value()); retry_ct += 1; } while (retry_ct < static_cast(split_step_ids.size()) << 2 && (extent == 1 || extent == 0)); @@ -929,7 +930,7 @@ PopulationGenerationRule::ResultKind MutateTileSize::Apply(SketchPolicyNode* pol // Divide one factor from lengths[src_idx] and multiply it to lengths[dst_idx] size_t dst_idx = random_perm[(i + 1) % random_perm.size()]; const std::vector& factors = policy->split_memo.GetFactors(length); - CHECK_GE(factors.size(), 1); + ICHECK_GE(factors.size(), 1); int divide_factor; if (dst_idx == lengths.size() - 1) { @@ -961,7 +962,7 @@ PopulationGenerationRule::ResultKind MutateTileSize::Apply(SketchPolicyNode* pol } } - CHECK_LE(GetIntImm(new_lengths.back()), max_innermost_split_factor); + ICHECK_LE(GetIntImm(new_lengths.back()), max_innermost_split_factor); StateNode* pstate = state->CopyOnWrite(); pstate->transform_steps.Set( @@ -994,7 +995,7 @@ PopulationGenerationRule::ResultKind MutateAutoUnroll::Apply(SketchPolicyNode* p // Randomly pick up an auto unroll pragma step auto step_id = pragma_steps[(*rand_gen)() % pragma_steps.size()]; auto ps = (*state)->transform_steps[step_id].as(); - CHECK(ps); + ICHECK(ps); // Mutate its value to a random candidates auto val = std::to_string(auto_unroll_configs[(*rand_gen)() % auto_unroll_configs.size()]); @@ -1035,7 +1036,7 @@ PopulationGenerationRule::ResultKind MutateComputeLocation::Apply(SketchPolicyNo size_t step_id = compute_at_steps[(*rand_gen)() % compute_at_steps.size()]; auto ps = (*state)->transform_steps[step_id].as(); int stage_inc = GetTargetStageIDInState(*state, step_id) - ps->stage_id; - CHECK(ps != nullptr); + ICHECK(ps != nullptr); // Randomly pick a new computation location std::vector> candidates = @@ -1156,14 +1157,14 @@ PopulationGenerationRule::ResultKind MutateParallel::Apply(SketchPolicyNode* pol if (ps->iter_id == 0) { step = AnnotationStep(ps->stage_id, 0, ps->annotation); } else { - CHECK_LE(ps->iter_id + iter_offset, tmp_s->stages[stage_id]->iters.size()); + ICHECK_LE(ps->iter_id + iter_offset, tmp_s->stages[stage_id]->iters.size()); step = AnnotationStep(ps->stage_id, ps->iter_id + iter_offset, ps->annotation); } } else if (auto ps = step.as()) { if (ps->iter_id == 0) { step = PragmaStep(ps->stage_id, 0, ps->pragma_type); } else { - CHECK_LE(ps->iter_id + iter_offset, tmp_s->stages[stage_id]->iters.size()); + ICHECK_LE(ps->iter_id + iter_offset, tmp_s->stages[stage_id]->iters.size()); step = PragmaStep(ps->stage_id, ps->iter_id + iter_offset, ps->pragma_type); } } else { diff --git a/src/auto_scheduler/search_policy/utils.cc b/src/auto_scheduler/search_policy/utils.cc index 9e72eeb3f0c2..3e2f7aaed44f 100644 --- a/src/auto_scheduler/search_policy/utils.cc +++ b/src/auto_scheduler/search_policy/utils.cc @@ -32,7 +32,7 @@ namespace auto_scheduler { Array GetSpatialSplitStepIds(const State& s, int stage_id) { const auto& stage = s->stages[stage_id]; const auto& pop = s->stages[stage_id]->op.as(); - CHECK(pop != nullptr); + ICHECK(pop != nullptr); const std::set& no_split_at_inner_name_set = stage->op->attrs.count(SearchPolicyKey::no_split_at_inner) ? GetIterNameSetParam(stage->op->attrs, SearchPolicyKey::no_split_at_inner) @@ -182,7 +182,7 @@ State DoMultiLevelTiling(const State& state, int stage_id, const std::string& fo for (const auto& iter : state->stages[stage_id]->iters) { if (!no_split_at_inner_name_set.count(iter->name)) { if (iter->iter_kind == IteratorKind::kSpatial) { - CHECK_GE(n_space, 1); + ICHECK_GE(n_space, 1); if (n_space == 1) { space_levels[0].push_back(iter); @@ -194,7 +194,7 @@ State DoMultiLevelTiling(const State& state, int stage_id, const std::string& fo spatial_split_step_ids->push_back(tmp_s->transform_steps.size() - 1); } } else if (iter->iter_kind == IteratorKind::kReduction) { - CHECK_GE(n_reduce, 1); + ICHECK_GE(n_reduce, 1); if (n_reduce == 1) { reduce_levels[0].push_back(iter); @@ -219,26 +219,26 @@ State DoMultiLevelTiling(const State& state, int stage_id, const std::string& fo } if (!space_outer.empty()) { - CHECK(!space_levels.empty()); + ICHECK(!space_levels.empty()); space_levels.front().insert(space_levels.front().begin(), std::make_move_iterator(space_outer.begin()), std::make_move_iterator(space_outer.end())); } if (!space_inner.empty()) { - CHECK(!space_levels.empty()); + ICHECK(!space_levels.empty()); space_levels.back().insert(space_levels.back().begin(), std::make_move_iterator(space_inner.begin()), std::make_move_iterator(space_inner.end())); } if (!reduce_outer.empty()) { - CHECK(!reduce_levels.empty()); + ICHECK(!reduce_levels.empty()); reduce_levels.front().insert(reduce_levels.front().begin(), std::make_move_iterator(reduce_outer.begin()), std::make_move_iterator(reduce_outer.end())); } if (!reduce_inner.empty()) { - CHECK(!reduce_levels.empty()); + ICHECK(!reduce_levels.empty()); reduce_levels.back().insert(reduce_levels.back().begin(), std::make_move_iterator(reduce_inner.begin()), std::make_move_iterator(reduce_inner.end())); @@ -274,7 +274,7 @@ State FollowTiling(const State& state, int stage_id, const std::vector& spl Array split_res; auto pop = state->stages[stage_id]->op.as(); - CHECK(pop != nullptr); + ICHECK(pop != nullptr); const Stage& stage = state->stages[stage_id]; const std::set& no_split_at_inner_name_set = stage->op->attrs.count(SearchPolicyKey::no_split_at_inner) @@ -285,8 +285,8 @@ State FollowTiling(const State& state, int stage_id, const std::vector& spl no_split_at_inner_name_in_stage_cnt += no_split_at_inner_name_set.count(iter->name); } - CHECK_EQ(state->stages[stage_id]->iters.size() - no_split_at_inner_name_in_stage_cnt, - split_step_ids.size()); + ICHECK_EQ(state->stages[stage_id]->iters.size() - no_split_at_inner_name_in_stage_cnt, + split_step_ids.size()); State tmp_s = state; int ct = 0; @@ -328,7 +328,7 @@ State FollowTiling(const State& state, int stage_id, const std::vector& spl } else if (n_split == 2) { space_2.push_back(iter); } else { - CHECK_EQ(n_split, 3); + ICHECK_EQ(n_split, 3); space_3.push_back(iter); } } diff --git a/src/auto_scheduler/search_policy/utils.h b/src/auto_scheduler/search_policy/utils.h index 5c015ca46a9b..f0c4cbca9ca0 100644 --- a/src/auto_scheduler/search_policy/utils.h +++ b/src/auto_scheduler/search_policy/utils.h @@ -99,29 +99,29 @@ inline int OperationToStage(const te::Operation& op, const State& state) { /*! \brief Get an integer from a tvm str Map. */ inline int GetIntParam(const Map& attr_dict, const std::string& key) { - CHECK_GT(attr_dict.count(key), 0) << "Cannot find key: \"" << key << "\" in " << attr_dict; + ICHECK_GT(attr_dict.count(key), 0) << "Cannot find key: \"" << key << "\" in " << attr_dict; auto pint = attr_dict[key].as(); - CHECK(pint != nullptr); + ICHECK(pint != nullptr); return pint->value; } /*! \brief Get a double from a tvm str Map. */ inline double GetDoubleParam(const Map& attr_dict, const std::string& key) { - CHECK_GT(attr_dict.count(key), 0) << "Cannot find key: \"" << key << "\" in " << attr_dict; + ICHECK_GT(attr_dict.count(key), 0) << "Cannot find key: \"" << key << "\" in " << attr_dict; auto pdouble = attr_dict[key].as(); - CHECK(pdouble != nullptr); + ICHECK(pdouble != nullptr); return pdouble->value; } /*! \brief Get a string from a tvm str Map. */ inline std::string GetStringParam(const Map& attr_dict, const std::string& key) { - CHECK_GT(attr_dict.count(key), 0) << "Cannot find key: \"" << key << "\" in " << attr_dict; + ICHECK_GT(attr_dict.count(key), 0) << "Cannot find key: \"" << key << "\" in " << attr_dict; const auto& target = attr_dict[key]; if (auto pstr = target.as()) { return pstr->value; } auto pstr = target.as(); - CHECK(pstr != nullptr); + ICHECK(pstr != nullptr); return pstr->data; } @@ -129,9 +129,9 @@ inline std::string GetStringParam(const Map& attr_dict, const inline std::set GetIterNameSetParam(const Map& attr_dict, const std::string& key) { std::set ret; - CHECK_GT(attr_dict.count(key), 0) << "Cannot find key: \"" << key << "\" in " << attr_dict; + ICHECK_GT(attr_dict.count(key), 0) << "Cannot find key: \"" << key << "\" in " << attr_dict; auto names = attr_dict[key].as(); - CHECK(names != nullptr); + ICHECK(names != nullptr); for (const auto& name : *names) { ret.insert(name.as()->data); } @@ -477,7 +477,7 @@ inline bool HasCrossThreadReduction(const State& state, int stage_id) { /*! \brief Return whether the stage has been tiled already. */ inline bool IsTiled(const Stage& stage) { auto op = stage->op.as(); - CHECK(op != nullptr); + ICHECK(op != nullptr); return stage->iters.size() != op->axis.size() + op->reduce_axis.size(); } @@ -502,7 +502,7 @@ inline void ExtractOriginalIterators(const std::string& name, std::setop.as(); - CHECK(pop != nullptr); + ICHECK(pop != nullptr); std::set original_names; const std::set& no_split_at_inner_name_set = @@ -583,7 +583,7 @@ inline State FuseAllReductionIterators(const State& state, int stage_id, Iterato } } - CHECK(!reduce_iters->empty()); + ICHECK(!reduce_iters->empty()); State tmp_s = state; if (reduce_iters->size() > 1) { *fused_iter = tmp_s.fuse(stage_id, *reduce_iters); @@ -609,7 +609,7 @@ inline State FuseAllOuterSpaceIterators(const State& state, int stage_id, Iterat to_fuse.push_back(it); } - CHECK(!to_fuse.empty()); + ICHECK(!to_fuse.empty()); State tmp_s = state; if (to_fuse.size() > 1) { *fused_iter = tmp_s.fuse(stage_id, to_fuse); @@ -649,7 +649,7 @@ inline int RandomChoose(const std::vector& prefix_sum_probs, std::mt1993 std::uniform_real_distribution<> dis(0.0, 1.0); double x = dis(*random_gen); - CHECK(!prefix_sum_probs.empty()); + ICHECK(!prefix_sum_probs.empty()); return std::lower_bound(prefix_sum_probs.begin(), prefix_sum_probs.end(), x) - prefix_sum_probs.begin(); diff --git a/src/auto_scheduler/search_task.cc b/src/auto_scheduler/search_task.cc index e3f35e9f0c19..0b85a03f0671 100755 --- a/src/auto_scheduler/search_task.cc +++ b/src/auto_scheduler/search_task.cc @@ -53,7 +53,7 @@ HardwareParams HardwareParamsNode::GetDefaultHardwareParams(const Target& target auto ctx = TVMContext{kDLGPU, 0}; auto func = tvm::runtime::Registry::Get("device_api.gpu"); - CHECK(func != nullptr) << "Cannot find GPU device_api in registry"; + ICHECK(func != nullptr) << "Cannot find GPU device_api in registry"; auto device_api = static_cast(((*func)()).operator void*()); tvm::runtime::TVMRetValue ret; diff --git a/src/auto_scheduler/transform_step.cc b/src/auto_scheduler/transform_step.cc index 73f673421378..852f1e1f17d8 100755 --- a/src/auto_scheduler/transform_step.cc +++ b/src/auto_scheduler/transform_step.cc @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -43,7 +44,7 @@ struct Handler<::tvm::Array<::tvm::Integer>> { inline static void Write(dmlc::JSONWriter* writer, const ::tvm::Array<::tvm::Integer>& array) { writer->BeginArray(false); for (const auto& i : array) { - CHECK(i.defined()); + ICHECK(i.defined()); writer->WriteArrayItem(i->value); } writer->EndArray(); @@ -65,7 +66,7 @@ struct Handler<::tvm::Array<::tvm::Optional<::tvm::Integer>>> { const ::tvm::Array<::tvm::Optional<::tvm::Integer>>& array) { writer->BeginArray(false); for (const auto& i : array) { - CHECK(i); + ICHECK(i); writer->WriteArrayItem(i.value()->value); } writer->EndArray(); @@ -125,7 +126,7 @@ Step StepReadFromRecord(dmlc::JSONReader* reader) { std::string name; bool s; s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&name); if (name == AnnotationStepNode::record_prefix_str) { return AnnotationStep(reader); @@ -283,13 +284,13 @@ AnnotationStep::AnnotationStep(dmlc::JSONReader* reader) { auto node = make_object(); bool s; s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->stage_id); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->iter_id); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); int int_val; reader->Read(&int_val); node->annotation = IteratorAnnotation(int_val); @@ -308,7 +309,7 @@ Iterator AnnotationStepNode::ApplyToState(State* state) const { const Stage& stage = (*state)->stages[stage_id]; Iterator it = stage->iters[iter_id]; - CHECK(it->annotation == IteratorAnnotation::kNone); + ICHECK(it->annotation == IteratorAnnotation::kNone); Iterator new_it = Iterator(it->name, it->range, it->iter_kind, annotation, &it->orig_iters); Stage new_stage = stage; new_stage.CopyOnWrite()->iters.Set(iter_id, new_it); @@ -410,7 +411,7 @@ FuseStep::FuseStep(int stage_id, const Array& fused_ids) { auto node = make_object(); node->stage_id = stage_id; for (const auto& x : fused_ids) { - CHECK(x->IsInstance()); + ICHECK(x->IsInstance()); } node->fused_ids = fused_ids; data_ = std::move(node); @@ -420,10 +421,10 @@ FuseStep::FuseStep(dmlc::JSONReader* reader) { auto node = make_object(); bool s; s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->stage_id); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->fused_ids); data_ = std::move(node); } @@ -446,7 +447,7 @@ Iterator FuseStepNode::ApplyToState(State* state) const { for (size_t i = 0; i < fused_ids.size(); ++i) { if (i > 0) { - CHECK_EQ(fused_ids[i]->value, fused_ids[i - 1]->value + 1); + ICHECK_EQ(fused_ids[i]->value, fused_ids[i - 1]->value + 1); } if (i != fused_ids.size() - 1) { @@ -574,13 +575,13 @@ PragmaStep::PragmaStep(dmlc::JSONReader* reader) { auto node = make_object(); bool s; s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->stage_id); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->iter_id); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); std::string string_value; reader->Read(&string_value); node->pragma_type = std::move(string_value); @@ -609,7 +610,7 @@ void PragmaStepNode::ApplyToState(State* state) const { break; } } - CHECK_LT(pos, pragma_type.size()) << "max step value not found."; + ICHECK_LT(pos, pragma_type.size()) << "max step value not found."; stage.CopyOnWrite()->attrs.auto_unroll_max_step = atoi(pragma_type.c_str() + pos + 1); pstate->stages.Set(stage_id, std::move(stage)); } else { @@ -628,7 +629,7 @@ void PragmaStepNode::ApplyToSchedule(Array* stages, break; } } - CHECK_LT(pos, pragma_type.size()) << "max step value not found."; + ICHECK_LT(pos, pragma_type.size()) << "max step value not found."; int value = atoi(pragma_type.c_str() + pos + 1); stage.pragma(axes[iter_id], "auto_unroll_max_step", value); stage.pragma(axes[iter_id], "unroll_explicit", true); @@ -651,7 +652,7 @@ String PragmaStepNode::PrintAsPythonAPI(Array* stages, break; } } - CHECK_LT(pos, pragma_type.size()) << "max step value not found."; + ICHECK_LT(pos, pragma_type.size()) << "max step value not found."; int value = atoi(pragma_type.c_str() + pos + 1); ss << "s[" << op_name << "].pragma(" << CleanName((*stage_to_axes)[stage][iter_id]->var->name_hint, op_name) @@ -674,7 +675,7 @@ ReorderStep::ReorderStep(int stage_id, const Array& after_ids) { auto node = make_object(); node->stage_id = stage_id; for (const auto& x : after_ids) { - CHECK(x->IsInstance()); + ICHECK(x->IsInstance()); } node->after_ids = after_ids; data_ = std::move(node); @@ -684,10 +685,10 @@ ReorderStep::ReorderStep(dmlc::JSONReader* reader) { auto node = make_object(); bool s; s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->stage_id); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->after_ids); data_ = std::move(node); } @@ -713,7 +714,7 @@ void ReorderStepNode::ApplyToSchedule(Array* stages, StageToAxesMap* stage_to_axes) const { auto stage = (*stages)[stage_id]; const Array& axes = stage_to_axes->at(stage); - CHECK_EQ(after_ids.size(), axes.size()); + ICHECK_EQ(after_ids.size(), axes.size()); Array new_axes; new_axes.reserve(axes.size()); @@ -879,7 +880,7 @@ String PrintSplitAsPythonAPI(Array* stages, StageToAxesMap* stage_to_ const auto& func_name = CleanName(stage->op->name); const auto& outs = ApplySplitToSchedule(stages, stage_to_axes, stage_id, iter_id, lengths, inner_to_outer); - CHECK_EQ(outs.size(), lengths.size() + 1); + ICHECK_EQ(outs.size(), lengths.size() + 1); std::stringstream ss; int size = static_cast(lengths.size()); @@ -921,23 +922,23 @@ SplitStep::SplitStep(dmlc::JSONReader* reader) { auto node = make_object(); bool s; s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->stage_id); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->iter_id); int int_val; s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&int_val); if (int_val) { node->extent = Integer(int_val); } s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->lengths); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->inner_to_outer); data_ = std::move(node); } @@ -988,14 +989,14 @@ void FollowSplitStepNode::WriteToRecord(dmlc::JSONWriter* writer) const { Array> FollowSplitStepNode::ExtractSplitLengths( const Array& transform_steps) const { // Make sure src_step_id is within the range of transform_steps. - CHECK_LT(src_step_id, transform_steps.size()); + ICHECK_LT(src_step_id, transform_steps.size()); auto ps = transform_steps[src_step_id].as(); - CHECK(ps != nullptr); + ICHECK(ps != nullptr); // Make sure the size of ps->lengths is not smaller than n_split-1. // Note that the number of actual splitting factors of src_step is ps->lengths.size()+1. - CHECK_LE(n_split, ps->lengths.size() + 1); - CHECK(ps != nullptr); + ICHECK_LE(n_split, ps->lengths.size() + 1); + ICHECK(ps != nullptr); Array> lengths; lengths.reserve(n_split); @@ -1029,16 +1030,16 @@ FollowSplitStep::FollowSplitStep(dmlc::JSONReader* reader) { auto node = make_object(); bool s; s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->stage_id); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->iter_id); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->src_step_id); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->n_split); data_ = std::move(node); } @@ -1079,19 +1080,19 @@ FollowFusedSplitStep::FollowFusedSplitStep(dmlc::JSONReader* reader) { auto node = make_object(); bool s; s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->stage_id); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->iter_id); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->src_step_ids); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->level); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->factor_or_nparts); data_ = std::move(node); } @@ -1112,9 +1113,9 @@ Optional FollowFusedSplitStepNode::ExtractSplitLength( for (int src_step_id : src_step_ids) { // Make sure the src_step_id is within the range of transform_steps. - CHECK_LT(src_step_id, transform_steps.size()); + ICHECK_LT(src_step_id, transform_steps.size()); auto ps = transform_steps[src_step_id].as(); - CHECK(ps != nullptr); + ICHECK(ps != nullptr); // Multiple the splitting factor on corresponding splitting level of src_steps. if (ps->lengths[level] && ret.defined()) { ret *= ps->lengths[level].value(); @@ -1158,16 +1159,16 @@ StorageAlignStep::StorageAlignStep(dmlc::JSONReader* reader) { auto node = make_object(); bool s; s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->stage_id); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->iter_id); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->factor); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->offset); data_ = std::move(node); } @@ -1224,13 +1225,13 @@ ComputeAtStep::ComputeAtStep(dmlc::JSONReader* reader) { auto node = make_object(); bool s; s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->stage_id); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->target_stage_id); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->target_iter_id); data_ = std::move(node); } @@ -1295,7 +1296,7 @@ ComputeInlineStep::ComputeInlineStep(dmlc::JSONReader* reader) { auto node = make_object(); bool s; s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->stage_id); data_ = std::move(node); } @@ -1311,7 +1312,7 @@ void ComputeInlineStepNode::ApplyToState(State* state) const { // Check the validity of compute_inline for (size_t i = 0; i < stage->iters.size(); ++i) { - CHECK_EQ((*state)->attach_map->iter_to_attached_stages.count(std::make_pair(stage_id, i)), 0) + ICHECK_EQ((*state)->attach_map->iter_to_attached_stages.count(std::make_pair(stage_id, i)), 0) << "Invalid compute_inline: There are some other stages that are attached to the " << "target stage"; } @@ -1351,7 +1352,7 @@ ComputeRootStep::ComputeRootStep(dmlc::JSONReader* reader) { auto node = make_object(); bool s; s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->stage_id); data_ = std::move(node); } @@ -1418,10 +1419,10 @@ Array GetFormerStageModifiableSteps(Step current_step, const Array& } } // add SplitStepNode required by rfactor - CHECK_GE(i, 1); - CHECK(transform_steps[i - 1]->IsInstance()); + ICHECK_GE(i, 1); + ICHECK(transform_steps[i - 1]->IsInstance()); const Step& split_step = transform_steps[i - 1]; - CHECK_EQ(split_step->stage_id, step->stage_id); + ICHECK_EQ(split_step->stage_id, step->stage_id); ret_steps.push_back(split_step); // add RfactorStepNode ret_steps.push_back(step); @@ -1449,15 +1450,15 @@ CacheReadStep::CacheReadStep(dmlc::JSONReader* reader) { auto node = make_object(); bool s; s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->stage_id); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); std::string string_value; reader->Read(&string_value); node->scope_name = std::move(string_value); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->reader_stage_ids); data_ = std::move(node); } @@ -1560,10 +1561,10 @@ CacheWriteStep::CacheWriteStep(dmlc::JSONReader* reader) { auto node = make_object(); bool s; s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->stage_id); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); std::string string_value; reader->Read(&string_value); node->scope_name = std::move(string_value); @@ -1587,7 +1588,7 @@ int CacheWriteStepNode::ApplyToState(State* state, const ComputeDAG& dag) const GetFormerStageModifiableSteps(GetRef(this), (*state)->transform_steps)); int added_ops = current_compute_dag->ops.size() - last_dag_op_size; // TODO(jcf94): Update this check to equal after fixing the cache write bug in TVM - CHECK_GE(added_ops, 1); + ICHECK_GE(added_ops, 1); // target_stage -> cache_write_stage + target_stage // Assume no step has been applied to the target stage before cache write. @@ -1691,13 +1692,13 @@ RfactorStep::RfactorStep(dmlc::JSONReader* reader) { auto node = make_object(); bool s; s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->stage_id); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->iter_id); s = reader->NextArrayItem(); - CHECK(s); + ICHECK(s); reader->Read(&node->factor_iter_id); data_ = std::move(node); } diff --git a/src/auto_scheduler/utils.h b/src/auto_scheduler/utils.h index 610fec96617a..88c649c6f919 100755 --- a/src/auto_scheduler/utils.h +++ b/src/auto_scheduler/utils.h @@ -150,8 +150,8 @@ inline bool IntArrayEqual(const Array& arr1, const Array& ar for (size_t i = 0; i < arr1.size(); ++i) { auto int1 = arr1[i].as(); auto int2 = arr2[i].as(); - CHECK(int1 != nullptr); - CHECK(int2 != nullptr); + ICHECK(int1 != nullptr); + ICHECK(int2 != nullptr); if (int1->value != int2->value) { return false; } @@ -169,7 +169,7 @@ inline double FloatArrayMean(const Array& float_array) { for (const auto& x : float_array) { auto floatimm = x.as(); - CHECK(floatimm != nullptr); + ICHECK(floatimm != nullptr); sum += floatimm->value; } return sum / float_array.size(); @@ -191,7 +191,7 @@ inline bool StrEndsWith(const String& a, const String& b) { /*! \brief Get an int value from an Expr */ inline int64_t GetIntImm(const PrimExpr& expr) { auto pint = expr.as(); - CHECK(pint != nullptr); + ICHECK(pint != nullptr); return pint->value; } diff --git a/src/autotvm/feature_visitor.cc b/src/autotvm/feature_visitor.cc index 54fc2522db66..15e09755cee2 100644 --- a/src/autotvm/feature_visitor.cc +++ b/src/autotvm/feature_visitor.cc @@ -60,7 +60,7 @@ void FeatureVisitor::VisitStmt_(const AttrStmtNode* op) { if (op->attr_key == attr::thread_extent || op->attr_key == attr::virtual_thread) { Var var = op->node.as()->var; const auto* extent = op->value.as(); - CHECK(extent); + ICHECK(extent); std::string name = var.get()->name_hint; AnnotationType ann = kParallel; diff --git a/src/autotvm/touch_extractor.cc b/src/autotvm/touch_extractor.cc index 91e2ee135b16..10ead718bae2 100644 --- a/src/autotvm/touch_extractor.cc +++ b/src/autotvm/touch_extractor.cc @@ -120,13 +120,13 @@ void TouchExtractor::ExitItervar_() { if (kv.second.stride != 0) { // multiply count for (auto stack_var : itervar_stack_) { auto touch_pattern = itervar_map[stack_var].touch_feature.find(kv.first); - CHECK(touch_pattern != itervar_map[stack_var].touch_feature.end()); + ICHECK(touch_pattern != itervar_map[stack_var].touch_feature.end()); touch_pattern->second.count *= itervar_map[var].length; } } else { // multiply reuse ratio for (auto stack_var : itervar_stack_) { auto touch_pattern = itervar_map[stack_var].touch_feature.find(kv.first); - CHECK(touch_pattern != itervar_map[stack_var].touch_feature.end()); + ICHECK(touch_pattern != itervar_map[stack_var].touch_feature.end()); touch_pattern->second.reuse *= itervar_map[var].length; } } @@ -151,7 +151,7 @@ void TouchExtractor::ExitItervar_() { for (auto stack_var : itervar_stack_) { if (ParallelLevel(itervar_map[stack_var].ann) == para_level + 1) { auto touch_pattern = itervar_map[stack_var].touch_feature.find(kv.first); - CHECK(touch_pattern != itervar_map[stack_var].touch_feature.end()); + ICHECK(touch_pattern != itervar_map[stack_var].touch_feature.end()); touch_pattern->second.thread_reuse = -kv.second.reuse; touch_pattern->second.thread_count = -kv.second.count; // NOTE: use minus as a flag to denote it is a base, diff --git a/src/contrib/hybrid/codegen_hybrid.cc b/src/contrib/hybrid/codegen_hybrid.cc index 67765f039714..7522f20523c8 100644 --- a/src/contrib/hybrid/codegen_hybrid.cc +++ b/src/contrib/hybrid/codegen_hybrid.cc @@ -65,14 +65,14 @@ std::string CodeGenHybrid::Finish() { return stream.str(); } void CodeGenHybrid::PrintType(DataType t, std::ostream& os) { if (t.is_float()) { os << "float"; - CHECK(t.bits() == 16 || t.bits() == 32 || t.bits() == 64); + ICHECK(t.bits() == 16 || t.bits() == 32 || t.bits() == 64); } else if (t.is_int()) { os << "int"; - CHECK(t.bits() == 8 || t.bits() == 16 || t.bits() == 32 || t.bits() == 64); + ICHECK(t.bits() == 8 || t.bits() == 16 || t.bits() == 32 || t.bits() == 64); } else { - CHECK(t.is_uint()) << "Unsupported type " << t; + ICHECK(t.is_uint()) << "Unsupported type " << t; os << "uint"; - CHECK(t.bits() == 8 || t.bits() == 16 || t.bits() == 32 || t.bits() == 64); + ICHECK(t.bits() == 8 || t.bits() == 16 || t.bits() == 32 || t.bits() == 64); } os << t.bits(); } @@ -93,7 +93,7 @@ template inline void PrintBinaryExpr(const T* op, const char* opstr, std::ostream& os, // NOLINT(*) CodeGenHybrid* p) { - CHECK(op->dtype.lanes() == 1) << "vec bin op not implemented"; + ICHECK(op->dtype.lanes() == 1) << "vec bin op not implemented"; if (isalpha(opstr[0])) { os << opstr << '('; p->PrintExpr(op->a, os); @@ -114,8 +114,8 @@ inline void PrintBinaryExpr(const T* op, const char* opstr, inline void PrintBinaryIntrinsitc(const CallNode* op, const char* opstr, std::ostream& os, // NOLINT(*) CodeGenHybrid* p) { - CHECK(op->dtype.lanes() == 1) << "vec bin intrin not implemented"; - CHECK_EQ(op->args.size(), 2U); + ICHECK(op->dtype.lanes() == 1) << "vec bin intrin not implemented"; + ICHECK_EQ(op->args.size(), 2U); os << '('; p->PrintExpr(op->args[0], os); os << opstr; @@ -228,7 +228,7 @@ void CodeGenHybrid::VisitExpr_(const CallNode* op, std::ostream& os) { // NOLIN } else if (op->op.same_as(builtin::shift_right())) { PrintBinaryIntrinsitc(op, ">>", os, this); } else if (op->op.same_as(builtin::bitwise_not())) { - CHECK_EQ(op->args.size(), 1U); + ICHECK_EQ(op->args.size(), 1U); os << "(~"; PrintExpr(op->args[0], os); os << ')'; @@ -251,9 +251,9 @@ void CodeGenHybrid::VisitExpr_(const CallNode* op, std::ostream& os) { // NOLIN os << ")"; } else { auto* ptr_op = op->op.as(); - CHECK(ptr_op != nullptr); + ICHECK(ptr_op != nullptr); std::string name = ptr_op->name; - CHECK_EQ(name.compare(0, 4, "tir."), 0); + ICHECK_EQ(name.compare(0, 4, "tir."), 0); os << name.substr(4) << "("; for (size_t i = 0; i < op->args.size(); i++) { PrintExpr(op->args[i], os); @@ -305,7 +305,7 @@ void CodeGenHybrid::VisitStmt_(const LetStmtNode* op) { void CodeGenHybrid::VisitStmt_(const AttrStmtNode* op) { if (op->attr_key == tir::attr::thread_extent) { auto iter_var = op->node.as(); - CHECK(iter_var); + ICHECK(iter_var); binds_[iter_var->var.get()] = dot_to_underscore(iter_var->var->name_hint); PrintIndent(); stream << "for " << binds_[iter_var->var.get()] << " in bind('" << iter_var->var->name_hint @@ -327,7 +327,7 @@ void CodeGenHybrid::VisitStmt_(const AttrStmtNode* op) { void CodeGenHybrid::VisitStmt_(const ProducerRealizeNode* op) { auto tensor = Downcast(op->producer); - CHECK(alloc_storage_scope_.count(tensor->op)); + ICHECK(alloc_storage_scope_.count(tensor->op)); if (!alloc_storage_scope_[tensor->op].empty()) { PrintIndent(); stream << GetTensorID(tensor) << " = allocate(("; @@ -493,7 +493,7 @@ void CodeGenHybrid::DumpStmt(const Stmt& stmt, const Array& inputs, stream << GetTensorID(GetRef(tensor)); } else { auto var = inputs[i].as(); - CHECK(var) << "Input should either be a tensor or a variable!"; + ICHECK(var) << "Input should either be a tensor or a variable!"; stream << GetVarID(var); } } diff --git a/src/contrib/tf_op/tvm_dso_op_kernels.cc b/src/contrib/tf_op/tvm_dso_op_kernels.cc index 705a3347b68c..5c119b64b93d 100644 --- a/src/contrib/tf_op/tvm_dso_op_kernels.cc +++ b/src/contrib/tf_op/tvm_dso_op_kernels.cc @@ -241,7 +241,7 @@ class TVMDSOOp : public OpKernel { // Load TVM function from dynamic library tvm::runtime::Module mod_dylib = tvm::runtime::Module::LoadFromFile(lib_path); tvm_func = mod_dylib.GetFunction(func_name); - CHECK(tvm_func != nullptr); + ICHECK(tvm_func != nullptr); } void Compute(tensorflow::OpKernelContext* context) override { diff --git a/src/driver/driver_api.cc b/src/driver/driver_api.cc index 2e41f0bee921..f88b6215f927 100644 --- a/src/driver/driver_api.cc +++ b/src/driver/driver_api.cc @@ -215,7 +215,7 @@ std::pair SplitDevHostFuncs(IRModule mod_mixed, const Target tir::transform::CombineContextCall(), }; auto opt_host = transform::Sequential(host_pass_list); - CHECK(mod_mixed.defined()) << "This module must be defined"; + ICHECK(mod_mixed.defined()) << "This module must be defined"; auto mhost = opt_host(mod_mixed); // device pipeline @@ -243,9 +243,9 @@ std::pair SplitDevHostFuncs(IRModule mod_mixed, const Target } if (target->kind->device_type == kDLCPU && target_host == target) { - CHECK(mdevice->functions.empty()) << "No device code should be generated when target " - << "and host_target are both llvm target." - << "\n"; + ICHECK(mdevice->functions.empty()) << "No device code should be generated when target " + << "and host_target are both llvm target." + << "\n"; } return {mhost, mdevice}; @@ -272,7 +272,7 @@ runtime::Module build(const Map& inputs, const Target& target_ IRModule mhost_all = IRModule(Map()); - CHECK(mhost_all.defined()) << "The host module must be defined"; + ICHECK(mhost_all.defined()) << "The host module must be defined"; for (const auto& it : inputs) { if (it.second.defined()) { @@ -280,9 +280,9 @@ runtime::Module build(const Map& inputs, const Target& target_ auto& mhost = pair.first; auto& mdevice = pair.second; - CHECK(mhost.defined()) << "The split host module must be defined"; + ICHECK(mhost.defined()) << "The split host module must be defined"; - CHECK(mhost_all.defined()) << "The host module must be defined"; + ICHECK(mhost_all.defined()) << "The host module must be defined"; mhost_all->Update(mhost); diff --git a/src/ir/diagnostic.cc b/src/ir/diagnostic.cc index 148831dc3ab6..f9299e3e27e8 100644 --- a/src/ir/diagnostic.cc +++ b/src/ir/diagnostic.cc @@ -225,7 +225,7 @@ void ReportAt(const DiagnosticContext& context, std::ostream& out, const Span& s return; } - CHECK(context->module->source_map.defined()); + ICHECK(context->module->source_map.defined()); auto it = context->module->source_map->source_map.find(span->source_name); // If the source name is not in the current source map, sources were not annotated. diff --git a/src/ir/env_func.cc b/src/ir/env_func.cc index 7b0d6e6f09c2..6e1f847d3fdd 100644 --- a/src/ir/env_func.cc +++ b/src/ir/env_func.cc @@ -38,7 +38,7 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) ObjectPtr CreateEnvNode(const std::string& name) { auto* f = runtime::Registry::Get(name); - CHECK(f != nullptr) << "Cannot find global function \'" << name << '\''; + ICHECK(f != nullptr) << "Cannot find global function \'" << name << '\''; ObjectPtr n = make_object(); n->func = *f; n->name = name; @@ -51,7 +51,7 @@ TVM_REGISTER_GLOBAL("ir.EnvFuncGet").set_body_typed(EnvFunc::Get); TVM_REGISTER_GLOBAL("ir.EnvFuncCall").set_body([](TVMArgs args, TVMRetValue* rv) { EnvFunc env = args[0]; - CHECK_GE(args.size(), 1); + ICHECK_GE(args.size(), 1); env->func.CallPacked(TVMArgs(args.values + 1, args.type_codes + 1, args.size() - 1), rv); }); diff --git a/src/ir/error.cc b/src/ir/error.cc index 5cd7a247d025..5d3978dda4ff 100644 --- a/src/ir/error.cc +++ b/src/ir/error.cc @@ -46,7 +46,7 @@ void ErrorReporter::RenderErrors(const IRModule& module, bool use_color) { // First we pick an error reporting strategy for each error. // TODO(@jroesch): Spanned errors are currently not supported. for (auto err : this->errors_) { - CHECK(!err.span.defined()) << "attempting to use spanned errors, currently not supported"; + ICHECK(!err.span.defined()) << "attempting to use spanned errors, currently not supported"; } NodeMap> error_maps; @@ -62,7 +62,7 @@ void ErrorReporter::RenderErrors(const IRModule& module, bool use_color) { auto has_errs = this->node_to_error_.find(node); - CHECK(has_errs != this->node_to_error_.end()); + ICHECK(has_errs != this->node_to_error_.end()); const auto& error_indicies = has_errs->second; @@ -113,7 +113,7 @@ void ErrorReporter::RenderErrors(const IRModule& module, bool use_color) { annotated_prog << AsText(func, false, [&err_map](const ObjectRef& expr) { auto it = err_map.find(expr); if (it != err_map.end()) { - CHECK_NE(it->second.size(), 0); + ICHECK_NE(it->second.size(), 0); return it->second; } else { return std::string(""); diff --git a/src/ir/expr.cc b/src/ir/expr.cc index 05d41cf204d6..67e5cea93011 100644 --- a/src/ir/expr.cc +++ b/src/ir/expr.cc @@ -49,17 +49,17 @@ PrimExpr PrimExpr::FromObject_(ObjectRef ref) { if (auto* ptr = ref.as()) { return tir::StringImm(GetRef(ptr)); } - CHECK(ObjectTypeChecker::Check(ref.get())) + ICHECK(ObjectTypeChecker::Check(ref.get())) << "Expect type " << ObjectTypeChecker::TypeName() << " but get " << ref->GetTypeKey(); return Downcast(ref); } IntImm::IntImm(DataType dtype, int64_t value) { - CHECK(dtype.is_scalar()) << "ValueError: IntImm can only take scalar."; - CHECK(dtype.is_int() || dtype.is_uint()) << "ValueError: IntImm supports only int or uint type."; + ICHECK(dtype.is_scalar()) << "ValueError: IntImm can only take scalar."; + ICHECK(dtype.is_int() || dtype.is_uint()) << "ValueError: IntImm supports only int or uint type."; if (dtype.is_uint()) { - CHECK_GE(value, 0U); + ICHECK_GE(value, 0U); } ObjectPtr node = make_object(); node->dtype = dtype; @@ -84,7 +84,7 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) }); FloatImm::FloatImm(DataType dtype, double value) { - CHECK_EQ(dtype.lanes(), 1) << "ValueError: FloatImm can only take scalar."; + ICHECK_EQ(dtype.lanes(), 1) << "ValueError: FloatImm can only take scalar."; ObjectPtr node = make_object(); node->dtype = dtype; node->value = value; diff --git a/src/ir/module.cc b/src/ir/module.cc index 231ae68dd4e0..b011f2d2f664 100644 --- a/src/ir/module.cc +++ b/src/ir/module.cc @@ -55,14 +55,14 @@ IRModule::IRModule(tvm::Map functions, for (const auto& kv : n->functions) { // set global var map - CHECK(n->global_var_map_.count(kv.first->name_hint) == 0) + ICHECK(n->global_var_map_.count(kv.first->name_hint) == 0) << "Duplicate global function name " << kv.first->name_hint; n->global_var_map_.Set(kv.first->name_hint, kv.first); } for (const auto& kv : n->type_definitions) { // set global typevar map - CHECK(n->global_type_var_map_.count(kv.first->name_hint) == 0) + ICHECK(n->global_type_var_map_.count(kv.first->name_hint) == 0) << "Duplicate global type definition name " << kv.first->name_hint; n->global_type_var_map_.Set(kv.first->name_hint, kv.first); n->RegisterConstructors(kv.first, kv.second); @@ -150,9 +150,9 @@ tvm::Array IRModuleNode::GetGlobalVars() const { } GlobalTypeVar IRModuleNode::GetGlobalTypeVar(const String& name) const { - CHECK(global_type_var_map_.defined()); + ICHECK(global_type_var_map_.defined()); auto it = global_type_var_map_.find(name); - CHECK(it != global_type_var_map_.end()) + ICHECK(it != global_type_var_map_.end()) << "Cannot find global type var " << name << " in the Module"; return (*it).second; } @@ -183,9 +183,9 @@ void WarnIfMalformed(const IRModule& mod, relay::Function func) { auto fv = relay::FreeVars(func); auto ftv = relay::FreeTypeVars(func, mod); // TODO(@jroesch): refactor to use diagnostic context - CHECK_EQ(fv.size(), 0) << "There are free variables: " << fv << std::endl; - CHECK_EQ(ftv.size(), 0) << "There are free type variables: " << fv - << " in function: " << AsText(func, false); + ICHECK_EQ(fv.size(), 0) << "There are free variables: " << fv << std::endl; + ICHECK_EQ(ftv.size(), 0) << "There are free type variables: " << fv + << " in function: " << AsText(func, false); } void IRModuleNode::Add(const GlobalVar& var, const BaseFunc& f, bool update) { @@ -202,9 +202,9 @@ void IRModuleNode::AddUnchecked(const GlobalVar& var, const BaseFunc& func) { auto it = global_var_map_.find(var->name_hint); if (it != global_var_map_.end()) { - CHECK_EQ((*it).second, var); + ICHECK_EQ((*it).second, var); } else { - CHECK(global_var_map_.count(var->name_hint) == 0) + ICHECK(global_var_map_.count(var->name_hint) == 0) << "Duplicate global function name " << var->name_hint; } @@ -234,7 +234,7 @@ void IRModuleNode::AddTypeDefUnchecked(const GlobalTypeVar& var, const TypeData& this->type_definitions.Set(var, type); if (!update) { // set global type var map - CHECK(global_type_var_map_.count(var->name_hint) == 0) + ICHECK(global_type_var_map_.count(var->name_hint) == 0) << "Duplicate global type definition name " << var->name_hint; } global_type_var_map_.Set(var->name_hint, var); @@ -258,7 +258,7 @@ void IRModuleNode::Remove(const GlobalVar& var) { BaseFunc IRModuleNode::Lookup(const GlobalVar& var) const { auto it = functions.find(var); - CHECK(it != functions.end()) << "There is no definition of " << var->name_hint; + ICHECK(it != functions.end()) << "There is no definition of " << var->name_hint; return (*it).second; } @@ -269,7 +269,7 @@ BaseFunc IRModuleNode::Lookup(const String& name) const { TypeData IRModuleNode::LookupTypeDef(const GlobalTypeVar& var) const { auto it = type_definitions.find(var); - CHECK(it != type_definitions.end()) << "There is no definition of " << var->name_hint; + ICHECK(it != type_definitions.end()) << "There is no definition of " << var->name_hint; return (*it).second; } @@ -280,7 +280,7 @@ TypeData IRModuleNode::LookupTypeDef(const String& name) const { Constructor IRModuleNode::LookupTag(const int32_t tag) { auto it = constructor_tag_map_.find(tag); - CHECK(it != constructor_tag_map_.end()) << "There is no constructor with the tag " << tag; + ICHECK(it != constructor_tag_map_.end()) << "There is no constructor with the tag " << tag; return (*it).second; } @@ -382,7 +382,7 @@ void IRModuleNode::Import(const String& path) { void IRModuleNode::ImportFromStd(const String& path) { auto* f = tvm::runtime::Registry::Get("tvm.relay.std_path"); - CHECK(f != nullptr) << "The Relay std_path is not set, please register tvm.relay.std_path."; + ICHECK(f != nullptr) << "The Relay std_path is not set, please register tvm.relay.std_path."; std::string std_path = (*f)(); this->Import(std_path + "/" + path); } @@ -406,7 +406,7 @@ TVM_REGISTER_GLOBAL("ir.Module_Add").set_body([](TVMArgs args, TVMRetValue* ret) GlobalVar var = args[1]; ObjectRef val = args[2]; bool update = args[3]; - CHECK(val->IsInstance()); + ICHECK(val->IsInstance()); if (val->IsInstance()) { mod->Add(var, Downcast(val), update); diff --git a/src/ir/op.cc b/src/ir/op.cc index 45c31963695c..5d2dc704f5b7 100644 --- a/src/ir/op.cc +++ b/src/ir/op.cc @@ -42,7 +42,7 @@ using OpRegistry = AttrRegistry; // find operator by name const Op& Op::Get(const String& name) { const OpRegEntry* reg = OpRegistry::Global()->Get(name); - CHECK(reg != nullptr) << "AttributeError: Operator " << name << " is not registered"; + ICHECK(reg != nullptr) << "AttributeError: Operator " << name << " is not registered"; return reg->op(); } @@ -130,7 +130,7 @@ struct Op2ObjectPtr : public ObjectRef { ObjectPtr CreateOp(const std::string& name) { // Hack use TVMRetValue as exchange auto op = Op::Get(name); - CHECK(op.defined()) << "Cannot find op \'" << name << '\''; + ICHECK(op.defined()) << "Cannot find op \'" << name << '\''; return Op2ObjectPtr::Get(op); } diff --git a/src/ir/span.cc b/src/ir/span.cc index 667c14e4a7ae..4a26f3a6eb11 100644 --- a/src/ir/span.cc +++ b/src/ir/span.cc @@ -74,9 +74,9 @@ Span::Span(SourceName source_name, int line, int end_line, int column, int end_c } Span Span::Merge(const Span& other) const { - CHECK(this->defined() && other.defined()) << "Span::Merge: both spans must be defined"; + ICHECK(this->defined() && other.defined()) << "Span::Merge: both spans must be defined"; - CHECK((*this)->source_name == other->source_name); + ICHECK((*this)->source_name == other->source_name); return Span((*this)->source_name, std::min((*this)->line, other->line), std::max((*this)->end_line, other->end_line), std::min((*this)->column, other->column), diff --git a/src/ir/transform.cc b/src/ir/transform.cc index ec88482ee3bf..3b774462565e 100644 --- a/src/ir/transform.cc +++ b/src/ir/transform.cc @@ -60,8 +60,8 @@ void PassContext::EnterWithScope() { void PassContext::ExitWithScope() { PassContextThreadLocalEntry* entry = RelayPassContextThreadLocalStore::Get(); - CHECK(!entry->context_stack.empty()); - CHECK(entry->context_stack.top().same_as(*this)); + ICHECK(!entry->context_stack.empty()); + ICHECK(entry->context_stack.top().same_as(*this)); entry->context_stack.pop(); } @@ -77,7 +77,7 @@ PassContext PassContext::Current() { class PassConfigManager { public: void Register(std::string key, uint32_t value_type_index) { - CHECK_EQ(key2vtype_.count(key), 0U); + ICHECK_EQ(key2vtype_.count(key), 0U); ValueTypeInfo info; info.type_index = value_type_index; info.type_key = runtime::Object::TypeIndex2Key(value_type_index); @@ -103,7 +103,7 @@ class PassConfigManager { LOG(FATAL) << os.str(); } const auto& info = it->second; - CHECK(kv.second.defined()) << "AttributeError: " << kv.first << " is None"; + ICHECK(kv.second.defined()) << "AttributeError: " << kv.first << " is None"; if (kv.second->IsInstance::ContainerType>()) { ObjectRef converted = reflection->CreateObject(info.type_key, Downcast>(kv.second)); @@ -376,7 +376,7 @@ Pass GetPass(const String& pass_name) { // pass } else if ((f = Registry::Get("relay._transform." + pass_name))) { } - CHECK(f != nullptr) << "Cannot use " << pass_name << "to create the pass"; + ICHECK(f != nullptr) << "Cannot use " << pass_name << "to create the pass"; return (*f)(); } @@ -385,7 +385,7 @@ Pass GetPass(const String& pass_name) { // ordering problem needs to be handled in the future. IRModule SequentialNode::operator()(IRModule mod, const PassContext& pass_ctx) const { for (const Pass& pass : passes) { - CHECK(pass.defined()) << "Found undefined pass for optimization."; + ICHECK(pass.defined()) << "Found undefined pass for optimization."; const PassInfo& pass_info = pass->Info(); if (!PassEnabled(pass_info)) continue; // resolve dependencies diff --git a/src/node/attr_registry.h b/src/node/attr_registry.h index 01d2b68c471b..f84be1467453 100644 --- a/src/node/attr_registry.h +++ b/src/node/attr_registry.h @@ -109,10 +109,10 @@ class AttrRegistry { op_map->data_.resize(index + 1, std::make_pair(TVMRetValue(), 0)); } std::pair& p = op_map->data_[index]; - CHECK(p.second != plevel) << "Attribute " << attr_name << " of " << key->AttrRegistryName() - << " is already registered with same plevel=" << plevel; - CHECK(value.type_code() != kTVMNullptr) << "Registered packed_func is Null for " << attr_name - << " of operator " << key->AttrRegistryName(); + ICHECK(p.second != plevel) << "Attribute " << attr_name << " of " << key->AttrRegistryName() + << " is already registered with same plevel=" << plevel; + ICHECK(value.type_code() != kTVMNullptr) << "Registered packed_func is Null for " << attr_name + << " of operator " << key->AttrRegistryName(); if (p.second < plevel && value.type_code() != kTVMNullptr) { op_map->data_[index] = std::make_pair(value, plevel); } diff --git a/src/node/container.cc b/src/node/container.cc index 60b5f40b98f1..b72d5a4cd736 100644 --- a/src/node/container.cc +++ b/src/node/container.cc @@ -96,8 +96,8 @@ struct NDArrayContainerTrait { static constexpr const std::nullptr_t VisitAttrs = nullptr; static void SHashReduce(const runtime::NDArray::Container* key, SHashReducer hash_reduce) { - CHECK_EQ(key->dl_tensor.ctx.device_type, kDLCPU) << "can only compare CPU tensor"; - CHECK(runtime::IsContiguous(key->dl_tensor)) << "Can only hash contiguous tensor"; + ICHECK_EQ(key->dl_tensor.ctx.device_type, kDLCPU) << "can only compare CPU tensor"; + ICHECK(runtime::IsContiguous(key->dl_tensor)) << "Can only hash contiguous tensor"; hash_reduce(runtime::DataType(key->dl_tensor.dtype)); hash_reduce(key->dl_tensor.ndim); for (int i = 0; i < key->dl_tensor.ndim; ++i) { @@ -113,10 +113,10 @@ struct NDArrayContainerTrait { auto ldt = lhs->dl_tensor.dtype; auto rdt = rhs->dl_tensor.dtype; - CHECK_EQ(lhs->dl_tensor.ctx.device_type, kDLCPU) << "can only compare CPU tensor"; - CHECK_EQ(rhs->dl_tensor.ctx.device_type, kDLCPU) << "can only compare CPU tensor"; - CHECK(runtime::IsContiguous(lhs->dl_tensor)) << "Can only compare contiguous tensor"; - CHECK(runtime::IsContiguous(rhs->dl_tensor)) << "Can only compare contiguous tensor"; + ICHECK_EQ(lhs->dl_tensor.ctx.device_type, kDLCPU) << "can only compare CPU tensor"; + ICHECK_EQ(rhs->dl_tensor.ctx.device_type, kDLCPU) << "can only compare CPU tensor"; + ICHECK(runtime::IsContiguous(lhs->dl_tensor)) << "Can only compare contiguous tensor"; + ICHECK(runtime::IsContiguous(rhs->dl_tensor)) << "Can only compare contiguous tensor"; if (lhs->dl_tensor.ndim != rhs->dl_tensor.ndim) return false; for (int i = 0; i < lhs->dl_tensor.ndim; ++i) { @@ -172,18 +172,18 @@ TVM_REGISTER_GLOBAL("node.Array").set_body([](TVMArgs args, TVMRetValue* ret) { TVM_REGISTER_GLOBAL("node.ArrayGetItem").set_body([](TVMArgs args, TVMRetValue* ret) { int64_t i = args[1]; - CHECK_EQ(args[0].type_code(), kTVMObjectHandle); + ICHECK_EQ(args[0].type_code(), kTVMObjectHandle); Object* ptr = static_cast(args[0].value().v_handle); - CHECK(ptr->IsInstance()); + ICHECK(ptr->IsInstance()); auto* n = static_cast(ptr); - CHECK_LT(static_cast(i), n->size()) << "out of bound of array"; + ICHECK_LT(static_cast(i), n->size()) << "out of bound of array"; *ret = n->at(i); }); TVM_REGISTER_GLOBAL("node.ArraySize").set_body([](TVMArgs args, TVMRetValue* ret) { - CHECK_EQ(args[0].type_code(), kTVMObjectHandle); + ICHECK_EQ(args[0].type_code(), kTVMObjectHandle); Object* ptr = static_cast(args[0].value().v_handle); - CHECK(ptr->IsInstance()); + ICHECK(ptr->IsInstance()); *ret = static_cast(static_cast(ptr)->size()); }); @@ -300,7 +300,7 @@ TVM_REGISTER_REFLECTION_VTABLE(MapNode, MapNodeTrait) .set_creator([](const std::string&) -> ObjectPtr { return MapNode::Empty(); }); TVM_REGISTER_GLOBAL("node.Map").set_body([](TVMArgs args, TVMRetValue* ret) { - CHECK_EQ(args.size() % 2, 0); + ICHECK_EQ(args.size() % 2, 0); std::unordered_map data; for (int i = 0; i < args.num_args; i += 2) { ObjectRef k = @@ -312,29 +312,29 @@ TVM_REGISTER_GLOBAL("node.Map").set_body([](TVMArgs args, TVMRetValue* ret) { }); TVM_REGISTER_GLOBAL("node.MapSize").set_body([](TVMArgs args, TVMRetValue* ret) { - CHECK_EQ(args[0].type_code(), kTVMObjectHandle); + ICHECK_EQ(args[0].type_code(), kTVMObjectHandle); Object* ptr = static_cast(args[0].value().v_handle); - CHECK(ptr->IsInstance()); + ICHECK(ptr->IsInstance()); auto* n = static_cast(ptr); *ret = static_cast(n->size()); }); TVM_REGISTER_GLOBAL("node.MapGetItem").set_body([](TVMArgs args, TVMRetValue* ret) { - CHECK_EQ(args[0].type_code(), kTVMObjectHandle); + ICHECK_EQ(args[0].type_code(), kTVMObjectHandle); Object* ptr = static_cast(args[0].value().v_handle); - CHECK(ptr->IsInstance()); + ICHECK(ptr->IsInstance()); auto* n = static_cast(ptr); auto it = n->find(String::CanConvertFrom(args[1]) ? args[1].operator String() : args[1].operator ObjectRef()); - CHECK(it != n->end()) << "cannot find the corresponding key in the Map"; + ICHECK(it != n->end()) << "cannot find the corresponding key in the Map"; *ret = (*it).second; }); TVM_REGISTER_GLOBAL("node.MapCount").set_body([](TVMArgs args, TVMRetValue* ret) { - CHECK_EQ(args[0].type_code(), kTVMObjectHandle); + ICHECK_EQ(args[0].type_code(), kTVMObjectHandle); Object* ptr = static_cast(args[0].value().v_handle); - CHECK(ptr->IsInstance()); + ICHECK(ptr->IsInstance()); const MapNode* n = static_cast(ptr); int64_t cnt = n->count(String::CanConvertFrom(args[1]) ? args[1].operator String() : args[1].operator ObjectRef()); @@ -342,7 +342,7 @@ TVM_REGISTER_GLOBAL("node.MapCount").set_body([](TVMArgs args, TVMRetValue* ret) }); TVM_REGISTER_GLOBAL("node.MapItems").set_body([](TVMArgs args, TVMRetValue* ret) { - CHECK_EQ(args[0].type_code(), kTVMObjectHandle); + ICHECK_EQ(args[0].type_code(), kTVMObjectHandle); Object* ptr = static_cast(args[0].value().v_handle); auto* n = static_cast(ptr); Array rkvs; diff --git a/src/node/reflection.cc b/src/node/reflection.cc index ec82c91bb652..9dc9d330bb77 100644 --- a/src/node/reflection.cc +++ b/src/node/reflection.cc @@ -50,7 +50,7 @@ class AttrGetter : public AttrVisitor { if (skey == key) *ret = value[0]; } void Visit(const char* key, uint64_t* value) final { - CHECK_LE(value[0], static_cast(std::numeric_limits::max())) + ICHECK_LE(value[0], static_cast(std::numeric_limits::max())) << "cannot return too big constant"; if (skey == key) *ret = static_cast(value[0]); } @@ -198,7 +198,7 @@ class NodeAttrSetter : public AttrVisitor { void InitNodeByPackedArgs(ReflectionVTable* reflection, Object* n, const TVMArgs& args) { NodeAttrSetter setter; setter.type_key = n->GetTypeKey(); - CHECK_EQ(args.size() % 2, 0); + ICHECK_EQ(args.size() % 2, 0); for (int i = 0; i < args.size(); i += 2) { setter.attrs.emplace(args[i].operator std::string(), args[i + 1]); } @@ -245,13 +245,13 @@ ObjectRef ReflectionVTable::CreateObject(const std::string& type_key, // Expose to FFI APIs. void NodeGetAttr(TVMArgs args, TVMRetValue* ret) { - CHECK_EQ(args[0].type_code(), kTVMObjectHandle); + ICHECK_EQ(args[0].type_code(), kTVMObjectHandle); Object* self = static_cast(args[0].value().v_handle); *ret = ReflectionVTable::Global()->GetAttr(self, args[1]); } void NodeListAttrNames(TVMArgs args, TVMRetValue* ret) { - CHECK_EQ(args[0].type_code(), kTVMObjectHandle); + ICHECK_EQ(args[0].type_code(), kTVMObjectHandle); Object* self = static_cast(args[0].value().v_handle); auto names = diff --git a/src/node/serialization.cc b/src/node/serialization.cc index 1f0e8c0f9b00..c7e4d27c8b2c 100644 --- a/src/node/serialization.cc +++ b/src/node/serialization.cc @@ -85,7 +85,7 @@ class NodeIndexer : public AttrVisitor { void Visit(const char* key, runtime::NDArray* value) final { DLTensor* ptr = const_cast((*value).operator->()); if (tensor_index_.count(ptr)) return; - CHECK_EQ(tensor_index_.size(), tensor_list_.size()); + ICHECK_EQ(tensor_index_.size(), tensor_list_.size()); tensor_index_[ptr] = tensor_list_.size(); tensor_list_.push_back(ptr); } @@ -97,10 +97,10 @@ class NodeIndexer : public AttrVisitor { // make index of all the children of node void MakeIndex(Object* node) { if (node == nullptr) return; - CHECK(node->IsInstance()); + ICHECK(node->IsInstance()); if (node_index_.count(node)) return; - CHECK_EQ(node_index_.size(), node_list_.size()); + ICHECK_EQ(node_index_.size(), node_list_.size()); node_index_[node] = node_list_.size(); node_list_.push_back(node); @@ -195,7 +195,7 @@ struct JSONNode { helper.ReadAllFields(reader); if (repr_str.size() != 0) { - CHECK_EQ(repr_b64.size(), 0U); + ICHECK_EQ(repr_b64.size(), 0U); repr_bytes = std::move(repr_str); } else if (repr_b64.size() != 0) { repr_bytes = Base64Decode(repr_b64); @@ -388,13 +388,13 @@ class JSONAttrSetter : public AttrVisitor { void Visit(const char* key, runtime::NDArray* value) final { size_t index; ParseValue(key, &index); - CHECK_LE(index, tensor_list_->size()); + ICHECK_LE(index, tensor_list_->size()); *value = tensor_list_->at(index); } void Visit(const char* key, ObjectRef* value) final { size_t index; ParseValue(key, &index); - CHECK_LE(index, node_list_->size()); + ICHECK_LE(index, node_list_->size()); *value = ObjectRef(node_list_->at(index)); } // set node to be current JSONNode @@ -421,13 +421,13 @@ class JSONAttrSetter : public AttrVisitor { if (jnode->type_key == MapNode::_type_key) { std::unordered_map container; if (jnode->keys.empty()) { - CHECK_EQ(jnode->data.size() % 2, 0U); + ICHECK_EQ(jnode->data.size() % 2, 0U); for (size_t i = 0; i < jnode->data.size(); i += 2) { container[ObjectRef(node_list_->at(jnode->data[i]))] = ObjectRef(node_list_->at(jnode->data[i + 1])); } } else { - CHECK_EQ(jnode->data.size(), jnode->keys.size()); + ICHECK_EQ(jnode->data.size(), jnode->keys.size()); for (size_t i = 0; i < jnode->data.size(); ++i) { container[String(jnode->keys[i])] = ObjectRef(node_list_->at(jnode->data[i])); } @@ -530,7 +530,7 @@ struct JSONGraph { } } } - CHECK_EQ(topo_order.size(), n_nodes) << "Cyclic reference detected in JSON file"; + ICHECK_EQ(topo_order.size(), n_nodes) << "Cyclic reference detected in JSON file"; std::reverse(std::begin(topo_order), std::end(topo_order)); return topo_order; } @@ -562,7 +562,7 @@ ObjectRef LoadJSON(std::string json_str) { support::Base64InStream b64strm(&mstrm); b64strm.InitPosition(); runtime::NDArray temp; - CHECK(temp.Load(&b64strm)); + ICHECK(temp.Load(&b64strm)); tensors.emplace_back(std::move(temp)); } } diff --git a/src/node/structural_equal.cc b/src/node/structural_equal.cc index e05cbbb60d1f..1fa72c92b6fc 100644 --- a/src/node/structural_equal.cc +++ b/src/node/structural_equal.cc @@ -90,7 +90,7 @@ class RemapVarSEqualHandler : public SEqualReducer::Handler { void MarkGraphNode() final { // need to push to pending tasks in this case - CHECK(!allow_push_to_stack_ && !task_stack_.empty()); + ICHECK(!allow_push_to_stack_ && !task_stack_.empty()); task_stack_.back().graph_equal = true; } @@ -108,8 +108,8 @@ class RemapVarSEqualHandler : public SEqualReducer::Handler { equal_map_lhs_.clear(); equal_map_rhs_.clear(); if (!SEqualReduce(lhs, rhs, map_free_vars)) return false; - CHECK_EQ(pending_tasks_.size(), 1U); - CHECK(allow_push_to_stack_); + ICHECK_EQ(pending_tasks_.size(), 1U); + ICHECK(allow_push_to_stack_); task_stack_.emplace_back(std::move(pending_tasks_.back())); pending_tasks_.clear(); return RunTasks(); @@ -141,7 +141,7 @@ class RemapVarSEqualHandler : public SEqualReducer::Handler { // We can safely mark lhs and rhs as equal to each other. auto it = equal_map_lhs_.find(entry.lhs); if (it != equal_map_lhs_.end()) { - CHECK(it->second.same_as(entry.rhs)); + ICHECK(it->second.same_as(entry.rhs)); } // create the map if the quality is graph equal. if (entry.graph_equal) { @@ -156,7 +156,7 @@ class RemapVarSEqualHandler : public SEqualReducer::Handler { // Expand the objects // The SEqual of the object can call into this->SEqualReduce // which populates the pending tasks. - CHECK_EQ(pending_tasks_.size(), 0U); + ICHECK_EQ(pending_tasks_.size(), 0U); allow_push_to_stack_ = false; if (!DispatchSEqualReduce(entry.lhs, entry.rhs, entry.map_free_vars)) return false; allow_push_to_stack_ = true; @@ -174,7 +174,7 @@ class RemapVarSEqualHandler : public SEqualReducer::Handler { // The default equal as registered in the structural equal vtable. bool DispatchSEqualReduce(const ObjectRef& lhs, const ObjectRef& rhs, bool map_free_vars) { auto compute = [=]() { - CHECK(lhs.defined() && rhs.defined() && lhs->type_index() == rhs->type_index()); + ICHECK(lhs.defined() && rhs.defined() && lhs->type_index() == rhs->type_index()); // skip entries that already have equality maps. auto it = equal_map_lhs_.find(lhs); if (it != equal_map_lhs_.end()) { diff --git a/src/node/structural_hash.cc b/src/node/structural_hash.cc index cb576fa9c067..e0b729d3f103 100644 --- a/src/node/structural_hash.cc +++ b/src/node/structural_hash.cc @@ -79,7 +79,7 @@ class VarCountingSHashHandler : public SHashReducer::Handler { void MarkGraphNode() final { // need to push to pending tasks in this case - CHECK(!allow_push_to_stack_ && !task_stack_.empty()); + ICHECK(!allow_push_to_stack_ && !task_stack_.empty()); task_stack_.back().graph_node_hash = true; } @@ -97,7 +97,7 @@ class VarCountingSHashHandler : public SHashReducer::Handler { } void SHashReduceFreeVar(const runtime::Object* var, bool map_free_vars) final { - CHECK(!hash_memo_.count(GetRef(var))); + ICHECK(!hash_memo_.count(GetRef(var))); if (map_free_vars) { // use counter value. size_t value = std::hash()(free_var_counter_++); @@ -127,19 +127,19 @@ class VarCountingSHashHandler : public SHashReducer::Handler { } size_t Hash(const ObjectRef& object, bool map_free_vars) { - CHECK_EQ(task_stack_.size(), 0U); - CHECK_EQ(pending_tasks_.size(), 0U); - CHECK_EQ(result_stack_.size(), 0U); + ICHECK_EQ(task_stack_.size(), 0U); + ICHECK_EQ(pending_tasks_.size(), 0U); + ICHECK_EQ(result_stack_.size(), 0U); this->SHashReduce(object, map_free_vars); - CHECK_EQ(pending_tasks_.size(), 1U); - CHECK(allow_push_to_stack_); + ICHECK_EQ(pending_tasks_.size(), 1U); + ICHECK(allow_push_to_stack_); task_stack_.emplace_back(std::move(pending_tasks_.back())); pending_tasks_.clear(); this->RunTasks(); - CHECK_EQ(result_stack_.size(), 1U); + ICHECK_EQ(result_stack_.size(), 1U); size_t ret = result_stack_.back(); result_stack_.pop_back(); return ret; @@ -160,7 +160,7 @@ class VarCountingSHashHandler : public SHashReducer::Handler { */ size_t ReduceHash(const Task& task) { size_t stack_begin = task.result_stack_index; - CHECK_LE(stack_begin, result_stack_.size()); + ICHECK_LE(stack_begin, result_stack_.size()); // combine in the reverse order of the stack. size_t reduced_hash = task.reduced_hash; @@ -210,7 +210,7 @@ class VarCountingSHashHandler : public SHashReducer::Handler { entry.children_expanded = true; entry.result_stack_index = result_stack_.size(); - CHECK_EQ(pending_tasks_.size(), 0U); + ICHECK_EQ(pending_tasks_.size(), 0U); allow_push_to_stack_ = false; // dispatch hash, reduce to the current slot. this->DispatchSHash(entry.object, entry.map_free_vars); @@ -227,7 +227,7 @@ class VarCountingSHashHandler : public SHashReducer::Handler { // The default equal as registered in the structural equal vtable. void DispatchSHash(const ObjectRef& object, bool map_free_vars) { - CHECK(object.defined()); + ICHECK(object.defined()); vtable_->SHashReduce(object.get(), SHashReducer(this, map_free_vars)); } diff --git a/src/parser/meta_ref.cc b/src/parser/meta_ref.cc index d23892753c5f..c74b396900d8 100644 --- a/src/parser/meta_ref.cc +++ b/src/parser/meta_ref.cc @@ -72,9 +72,9 @@ struct MetaRefExpander : public ExprMutator { if (auto op_node = call->op.as()) { if (op_node->name == "parser.MetaRef") { auto meta_attrs = call->attrs.as(); - CHECK(meta_attrs) << "an internal error has occurred"; + ICHECK(meta_attrs) << "an internal error has occurred"; auto nodes = table.at(meta_attrs->node_type_key); - CHECK_LT(meta_attrs->node_index, nodes.size()); + ICHECK_LT(meta_attrs->node_index, nodes.size()); return Downcast(nodes[meta_attrs->node_index]); } } diff --git a/src/parser/parser.cc b/src/parser/parser.cc index 9c9965ca588f..987a6e20ec38 100644 --- a/src/parser/parser.cc +++ b/src/parser/parser.cc @@ -371,7 +371,7 @@ class Parser { * \return The Nth token. */ Token Lookahead(int n) { - CHECK_GE(n, 1) << "lookahead is only valid when n >= 1"; + ICHECK_GE(n, 1) << "lookahead is only valid when n >= 1"; // We intend to skip n - 1 tokens, then return the nth. auto old_pos = pos; @@ -822,7 +822,7 @@ class Parser { ctor = tvm::Constructor(ctor_name, arg_types, type_global); } - CHECK(ctor.defined()); + ICHECK(ctor.defined()); try { this->ctors.Add(ctor_name, ctor); @@ -944,7 +944,7 @@ class Parser { } } - CHECK_GE(exprs.size(), 1); + ICHECK_GE(exprs.size(), 1); if (exprs.size() == 1) { // ICHECK(exprs[0].defined() && exprs[0]->span.defined()) @@ -1258,7 +1258,7 @@ class Parser { auto op = opt_op[0]; Expr right = WithSpan([this] { return ParseCallExpr(); }); - CHECK(right->span.defined()); + ICHECK(right->span.defined()); // If the operator stack is empty // we parse an operator and expression @@ -1285,7 +1285,7 @@ class Parser { exprs.pop_back(); Expr left = exprs.back(); exprs.pop_back(); - CHECK(new_op.op.defined()) << "a call op must be set " << new_op.op; + ICHECK(new_op.op.defined()) << "a call op must be set " << new_op.op; exprs.push_back( relay::Call(new_op.op, {left, right}, Attrs(), {}, left->span.Merge(right->span))); } @@ -1301,7 +1301,7 @@ class Parser { exprs.pop_back(); Expr left = exprs.back(); exprs.pop_back(); - CHECK(new_op.op.defined()) << "a call op must be set " << new_op.op; + ICHECK(new_op.op.defined()) << "a call op must be set " << new_op.op; exprs.push_back( relay::Call(new_op.op, {left, right}, Attrs(), {}, left->span.Merge(right->span))); } @@ -1369,7 +1369,7 @@ class Parser { } Expr ParseCallArgs(Expr op) { - CHECK(op.defined()) << "the operator must be defined"; + ICHECK(op.defined()) << "the operator must be defined"; DLOG(INFO) << "Parser::ParseCallArgs"; Map raw_attrs; @@ -1401,7 +1401,7 @@ class Parser { if (is_op && op_key.size()) { auto attr_obj = tvm::ReflectionVTable::Global()->CreateObject(op_key, raw_attrs); - CHECK(attr_obj.defined()); + ICHECK(attr_obj.defined()); attrs = Downcast(attr_obj); } @@ -1500,7 +1500,7 @@ class Parser { auto spanned_idents = ParseHierarchicalName(); auto idents = spanned_idents.data; auto span = spanned_idents.span; - CHECK_NE(idents.size(), 0); + ICHECK_NE(idents.size(), 0); std::stringstream op_name; int i = 0; int periods = idents.size() - 1; diff --git a/src/parser/source_map.cc b/src/parser/source_map.cc index 40998b0c9dc4..7ac978cd6341 100644 --- a/src/parser/source_map.cc +++ b/src/parser/source_map.cc @@ -62,7 +62,7 @@ Source::Source(SourceName src_name, std::string source) { tvm::String Source::GetLine(int line) { DLOG(INFO) << "Source::GetLine: line=" << line; - CHECK(line - 1 < static_cast((*this)->line_map.size())) + ICHECK(line - 1 < static_cast((*this)->line_map.size())) << "requested line: " << line << "at index: " << (line - 1) << "line_map size: " << (*this)->line_map.size() << "source: " << (*this)->source; diff --git a/src/parser/tokenizer.h b/src/parser/tokenizer.h index 20ad1734e573..a9ae64ba8fb1 100644 --- a/src/parser/tokenizer.h +++ b/src/parser/tokenizer.h @@ -100,7 +100,7 @@ struct Tokenizer { bool More() { return this->pos < this->source.size(); } char Peek() { - CHECK(pos < this->source.size()); + ICHECK(pos < this->source.size()); return this->source.at(this->pos); } @@ -170,7 +170,7 @@ struct Tokenizer { } Token ParseNumber(bool is_pos, bool is_float, std::string number) { - CHECK(number.size() > 0) << "an empty string is an invalid number"; + ICHECK(number.size() > 0) << "an empty string is an invalid number"; try { if (is_float) { @@ -231,22 +231,22 @@ struct Tokenizer { int line = this->line; int column = this->col; - CHECK_EQ(Peek(), '['); + ICHECK_EQ(Peek(), '['); Next(); std::stringstream type_key; while (More() && Peek() != ']') { type_key << Next(); } - CHECK_EQ(Peek(), ']'); + ICHECK_EQ(Peek(), ']'); Next(); - CHECK_EQ(Peek(), '['); + ICHECK_EQ(Peek(), '['); Next(); std::stringstream str_index; while (More() && Peek() != ']') { str_index << Next(); } - CHECK_EQ(Peek(), ']'); + ICHECK_EQ(Peek(), ']'); Next(); // todo: add error handling around bad indices auto index = ParseNumber(true, false, str_index.str()).ToNumber(); @@ -266,7 +266,7 @@ struct Tokenizer { raw_attribute << Next(); } - CHECK_EQ(Next(), ']'); + ICHECK_EQ(Next(), ']'); auto attribute = raw_attribute.str(); // Clean up the white-space on both sides. @@ -537,7 +537,7 @@ struct Tokenizer { DLOG(INFO) << "tvm::parser::Tokenize"; while (this->More()) { auto token = TokenizeOnce(); - CHECK(token.defined()); + ICHECK(token.defined()); this->tokens.push_back(token); } this->tokens.push_back(NewToken(TokenType::kEndOfFile)); @@ -576,15 +576,15 @@ std::vector Condense(const std::vector& tokens, Token* table) { i += 1; // TODO(@jroesch): merge spans auto tok = Token(current->span, TokenType::kLocal, next->data); - CHECK(tok.defined()); + ICHECK(tok.defined()); out.push_back(tok); } else if (next->token_type == TokenType::kInteger) { i += 1; auto tok = Token(current->span, TokenType::kGraph, next->data); - CHECK(tok.defined()); + ICHECK(tok.defined()); out.push_back(tok); } else { - CHECK(current.defined()); + ICHECK(current.defined()); out.push_back(current); } continue; @@ -596,10 +596,10 @@ std::vector Condense(const std::vector& tokens, Token* table) { i += 1; // TODO(@jroesch): merge spans auto tok = Token(current->span, TokenType::kGlobal, next->data); - CHECK(tok.defined()); + ICHECK(tok.defined()); out.push_back(tok); } else { - CHECK(current.defined()); + ICHECK(current.defined()); out.push_back(current); } continue; @@ -638,7 +638,7 @@ std::pair, Token> Tokenize(const DiagnosticContext& ctx, cons Token meta_table(Span(), TokenType::kUnknown, ObjectRef()); auto tokens = Condense(tokenizer.tokens, &meta_table); for (auto token : tokens) { - CHECK(token.defined()); + ICHECK(token.defined()); } return {tokens, meta_table}; } diff --git a/src/printer/doc.cc b/src/printer/doc.cc index ab1eddbe7d1e..4b22d54448c2 100644 --- a/src/printer/doc.cc +++ b/src/printer/doc.cc @@ -85,7 +85,7 @@ class DocLine : public DocAtom { // DSL function implementations Doc& Doc::operator<<(const Doc& right) { - CHECK(this != &right); + ICHECK(this != &right); this->stream_.insert(this->stream_.end(), right.stream_.begin(), right.stream_.end()); return *this; } diff --git a/src/printer/meta_data.h b/src/printer/meta_data.h index df27d92170c6..233da1baffd8 100644 --- a/src/printer/meta_data.h +++ b/src/printer/meta_data.h @@ -99,7 +99,7 @@ class TextMetaDataContext { return it->second; } std::string type_key = node->GetTypeKey(); - CHECK(!type_key.empty()); + ICHECK(!type_key.empty()); Array& mvector = meta_data_[type_key]; int64_t index = static_cast(mvector.size()); mvector.push_back(node); diff --git a/src/printer/relay_text_printer.cc b/src/printer/relay_text_printer.cc index 555d335a51da..4132ab14ff29 100644 --- a/src/printer/relay_text_printer.cc +++ b/src/printer/relay_text_printer.cc @@ -322,7 +322,7 @@ Doc RelayTextPrinter::VisitExpr_(const ConstantNode* op) { if (op->is_scalar()) { std::ostringstream os; DataType dtype = DataType(op->data->dtype); - CHECK_EQ(op->data->ctx.device_type, kDLCPU); + ICHECK_EQ(op->data->ctx.device_type, kDLCPU); if (dtype == DataType::Int(32)) { return ScalarLiteral(dtype, static_cast(op->data->data)[0]); } else if (dtype == DataType::Int(64)) { @@ -831,7 +831,7 @@ std::vector RelayTextPrinter::PrintFuncAttrs(const Attrs& attrs) { std::vector docs; if (!attrs.defined()) return docs; const auto* dict_attrs = attrs.as(); - CHECK(dict_attrs); + ICHECK(dict_attrs); for (const auto& k : dict_attrs->dict) { Doc doc; doc << k.first << "=" << Print(k.second); diff --git a/src/printer/tir_text_printer.cc b/src/printer/tir_text_printer.cc index 7feb0b5031ab..107817db29b3 100644 --- a/src/printer/tir_text_printer.cc +++ b/src/printer/tir_text_printer.cc @@ -353,7 +353,7 @@ Doc TIRTextPrinter::VisitExpr_(const CallNode* op) { } else { // TODO(bohan): Print out the name by he global var in the module. auto* op_gvar = op->op.as(); - CHECK(op_gvar != nullptr); + ICHECK(op_gvar != nullptr); doc << "@" << Doc::Text(op_gvar->name_hint) << "("; } std::vector args; diff --git a/src/printer/tvmscript_printer.cc b/src/printer/tvmscript_printer.cc index 5add7c17b04c..09f95e44b6d8 100644 --- a/src/printer/tvmscript_printer.cc +++ b/src/printer/tvmscript_printer.cc @@ -475,7 +475,7 @@ Doc TVMScriptPrinter::VisitExpr_(const CallNode* op) { doc << Doc::Text(ptr_op->name) << "("; } else { auto* op_gvar = op->op.as(); - CHECK(op_gvar != nullptr); + ICHECK(op_gvar != nullptr); doc << Doc::Text(op_gvar->name_hint) << "("; } std::vector args; @@ -566,7 +566,7 @@ Doc TVMScriptPrinter::VisitStmt_(const AttrStmtNode* op) { // concise thread env if (op->node->IsInstance() && op->attr_key == "thread_extent") { const auto* iter_var = Downcast(op->node).get(); - CHECK(!iter_var->dom.defined()); + ICHECK(!iter_var->dom.defined()); var_not_in_headers.insert(iter_var->var.get()); var_env_map_[iter_var->var] = iter_var->thread_tag; if (current_num_ != num_child_ - 1) { @@ -890,7 +890,7 @@ Doc TVMScriptPrinter::PrintBuffer(const BufferNode* op) { TVM_REGISTER_GLOBAL("script.AsTVMScript") .set_body_typed([](const ObjectRef& functions, bool show_meta) { - CHECK(functions.as() != nullptr || functions.as() != nullptr); + ICHECK(functions.as() != nullptr || functions.as() != nullptr); return "@tvm.script.tir\n" + TVMScriptPrinter(show_meta).Print(functions).str() + "\n"; }); diff --git a/src/relay/analysis/annotated_region_set.cc b/src/relay/analysis/annotated_region_set.cc index 587add36706f..04a18c4b7351 100644 --- a/src/relay/analysis/annotated_region_set.cc +++ b/src/relay/analysis/annotated_region_set.cc @@ -119,7 +119,7 @@ class AnnotatedRegionSet::Creator : protected MixedModeVisitor { } auto arg_region = region_set_->GetRegion(arg); - CHECK_EQ(region.defined(), arg_region.defined()) + ICHECK_EQ(region.defined(), arg_region.defined()) << "Arg regions are inconsistent: " << AsText(expr); if (region.defined() && region != arg_region) { region_set_->MergeRegions(arg_region, region); @@ -137,21 +137,21 @@ class AnnotatedRegionSet::Creator : protected MixedModeVisitor { AddToArgRegion(GetRef(call), call->args); } else if (call->op == begin_op_) { // The annotation node is inserted on edge so it must have only one argument. - CHECK_EQ(call->args.size(), 1U); + ICHECK_EQ(call->args.size(), 1U); std::string target = call->attrs.as()->compiler; // Check if the argument already belongs to a region auto region = region_set_->GetRegion(GetRef(call)); - CHECK(!region.defined()); + ICHECK(!region.defined()); // Create a new region. region = region_set_->MakeRegion(target); region->nodes_.insert(GetRef(call)); region->ins_.push_back(GetRef(call)); } else { - CHECK_EQ(call->op, end_op_); + ICHECK_EQ(call->op, end_op_); // The annotation node is inserted on edge so it must have only one argument. - CHECK_EQ(call->args.size(), 1U); + ICHECK_EQ(call->args.size(), 1U); std::string target = call->attrs.as()->compiler; // Check if the argument already belongs to a region @@ -162,7 +162,7 @@ class AnnotatedRegionSet::Creator : protected MixedModeVisitor { } else { // If the argument is belonged to a region, it must have the same target. // Otherwise we should see a region_begin op. - CHECK_EQ(region->GetTarget(), target); + ICHECK_EQ(region->GetTarget(), target); } region->nodes_.insert(GetRef(call)); region->outs_.push_back(GetRef(call)); diff --git a/src/relay/analysis/annotated_region_set.h b/src/relay/analysis/annotated_region_set.h index cbcf155350df..d9923cca99fc 100644 --- a/src/relay/analysis/annotated_region_set.h +++ b/src/relay/analysis/annotated_region_set.h @@ -114,7 +114,7 @@ class AnnotatedRegion : public ObjectRef { /*! \return Mutable pointers to the node. */ AnnotatedRegionNode* operator->() const { auto* ptr = get_mutable(); - CHECK(ptr != nullptr); + ICHECK(ptr != nullptr); return static_cast(ptr); } }; @@ -216,39 +216,39 @@ class AnnotatedRegionSet : public ObjectRef { /*! \return The begin iterator. */ iterator begin() { auto* n = operator->(); - CHECK(n); + ICHECK(n); return n->begin(); } /*! \return The end iterator. */ iterator end() { auto* n = operator->(); - CHECK(n); + ICHECK(n); return n->end(); } /*! \return The begin iterator. */ const_iterator begin() const { const auto* n = operator->(); - CHECK(n); + ICHECK(n); return n->begin(); } /*! \return The end iterator. */ const_iterator end() const { const auto* n = operator->(); - CHECK(n); + ICHECK(n); return n->end(); } /*! \return mutable pointers to the node. */ AnnotatedRegionSetNode* operator->() const { auto* ptr = get_mutable(); - CHECK(ptr != nullptr); + ICHECK(ptr != nullptr); return static_cast(ptr); } /*! \return The region an expression belongs to. */ AnnotatedRegion operator[](const Expr& expr) { const auto* n = operator->(); - CHECK(n); + ICHECK(n); return n->GetRegion(expr); } diff --git a/src/relay/analysis/call_graph.cc b/src/relay/analysis/call_graph.cc index 0d3fedcde0f7..9edb471f7f79 100644 --- a/src/relay/analysis/call_graph.cc +++ b/src/relay/analysis/call_graph.cc @@ -51,7 +51,7 @@ CallGraph::CallGraph(IRModule module) { } void CallGraphNode::AddToCallGraph(const GlobalVar& gv, const Function& func) { - CHECK(func.defined() && gv.defined()); + ICHECK(func.defined() && gv.defined()); // Add the current global function as an entry to the call grpah. CallGraphEntry* cg_node = LookupGlobalVar(gv); @@ -73,20 +73,20 @@ void CallGraphNode::AddToCallGraph(const GlobalVar& gv, const Function& func) { const CallGraphEntry* CallGraphNode::operator[](const GlobalVar& gv) const { const_iterator cit = call_graph_.find(gv); - CHECK(cit != call_graph_.end()) << "GlobalVar " << gv->name_hint - << " not found in the call graph!"; + ICHECK(cit != call_graph_.end()) + << "GlobalVar " << gv->name_hint << " not found in the call graph!"; return cit->second.get(); } CallGraphEntry* CallGraphNode::operator[](const GlobalVar& gv) { const_iterator cit = call_graph_.find(gv); - CHECK(cit != call_graph_.end()) << "GlobalVar " << gv->name_hint - << " not found in the call graph!"; + ICHECK(cit != call_graph_.end()) + << "GlobalVar " << gv->name_hint << " not found in the call graph!"; return cit->second.get(); } BaseFunc CallGraphNode::GetGlobalFunction(const GlobalVar& var) const { - CHECK(module->ContainGlobalVar(var->name_hint)) + ICHECK(module->ContainGlobalVar(var->name_hint)) << "GlobalVar " << var->name_hint << " not found in the current ir module"; return module->Lookup(var); } @@ -94,13 +94,13 @@ BaseFunc CallGraphNode::GetGlobalFunction(const GlobalVar& var) const { // Query the existence of a GlobalVar in the call graph. It creates an entry if // there is no such node available. CallGraphEntry* CallGraphNode::LookupGlobalVar(const GlobalVar& gv) { - CHECK(gv.defined()); + ICHECK(gv.defined()); // This inserts an element to the call graph if it is not there yet. auto& call_graph_node = call_graph_[gv]; if (call_graph_node) return call_graph_node.get(); - CHECK(module->ContainGlobalVar(gv->name_hint)) + ICHECK(module->ContainGlobalVar(gv->name_hint)) << "GlobalVar " << gv->name_hint << " not found in the current ir module"; // Create the node for the inserted entry. @@ -118,7 +118,7 @@ void CallGraphNode::Print(std::ostream& os) const { GlobalVar CallGraphNode::RemoveGlobalVarFromModule(CallGraphEntry* cg_node, bool update_call_graph) { - CHECK(cg_node->empty() || (cg_node->IsRecursive() && cg_node->size() == 1)) + ICHECK(cg_node->empty() || (cg_node->IsRecursive() && cg_node->size() == 1)) << "Cannot remove global var " << cg_node->GetNameHint() << " from call graph, because it still calls " << cg_node->size() << " other global functions"; @@ -232,7 +232,7 @@ inline void CallGraphEntry::AddCalledGlobal(CallGraphEntry* cg_node) { // Remove an edge from the current global function to the callee. void CallGraphEntry::RemoveCallTo(const GlobalVar& callee) { for (auto it = begin();; ++it) { - CHECK(it != end()) << "Cannot find global function " << callee->name_hint << " to remove!"; + ICHECK(it != end()) << "Cannot find global function " << callee->name_hint << " to remove!"; if (it->second->GetGlobalVar() == callee) { // Only remove one occurrence of the call site. it->second->DecRef(); @@ -256,7 +256,7 @@ void CallGraphEntry::RemoveAllCallTo(CallGraphEntry* callee) { } } // Make sure all references to the callee are removed. - CHECK_EQ(callee->GetRefCount(), 0U) + ICHECK_EQ(callee->GetRefCount(), 0U) << "All references to " << callee->GetNameHint() << " should have been removed"; } @@ -291,7 +291,7 @@ TVM_REGISTER_NODE_TYPE(CallGraphNode); TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) .set_dispatch([](const ObjectRef& ref, ReprPrinter* p) { auto* node = static_cast(ref.get()); - CHECK(node); + ICHECK(node); p->stream << "CallGraph: \n" << GetRef(node); }); diff --git a/src/relay/analysis/call_graph.h b/src/relay/analysis/call_graph.h index 07b25278b1d6..7cc813ebbff1 100644 --- a/src/relay/analysis/call_graph.h +++ b/src/relay/analysis/call_graph.h @@ -218,25 +218,25 @@ class CallGraph : public ObjectRef { /*! \return The begin iterator. */ iterator begin() { auto* n = operator->(); - CHECK(n); + ICHECK(n); return n->begin(); } /*! \return The end iterator. */ iterator end() { auto* n = operator->(); - CHECK(n); + ICHECK(n); return n->end(); } /*! \return The begin iterator. */ const_iterator begin() const { const auto* n = operator->(); - CHECK(n); + ICHECK(n); return n->begin(); } /*! \return The end iterator. */ const_iterator end() const { const auto* n = operator->(); - CHECK(n); + ICHECK(n); return n->end(); } @@ -249,7 +249,7 @@ class CallGraph : public ObjectRef { */ const CallGraphEntry* operator[](const GlobalVar& gv) const { const auto* n = operator->(); - CHECK(n); + ICHECK(n); return (*n)[gv]; } /*! @@ -261,7 +261,7 @@ class CallGraph : public ObjectRef { */ CallGraphEntry* operator[](const GlobalVar& gv) { auto* n = operator->(); - CHECK(n); + ICHECK(n); return (*n)[gv]; } /*! @@ -273,7 +273,7 @@ class CallGraph : public ObjectRef { */ const CallGraphEntry* operator[](const std::string& gvar_name) const { const auto* n = operator->(); - CHECK(n); + ICHECK(n); return (*n)[gvar_name]; } /*! @@ -285,14 +285,14 @@ class CallGraph : public ObjectRef { */ CallGraphEntry* operator[](const std::string& gvar_name) { auto* n = operator->(); - CHECK(n); + ICHECK(n); return (*n)[gvar_name]; } /*! \return mutable pointers to the node. */ CallGraphNode* operator->() const { auto* ptr = get_mutable(); - CHECK(ptr != nullptr); + ICHECK(ptr != nullptr); return static_cast(ptr); } @@ -360,7 +360,7 @@ class CallGraphEntry { * \return The fetched CallGraphEntry. */ CallGraphEntry* operator[](size_t i) const { - CHECK_LT(i, called_globals_.size()) << "Invalid Index"; + ICHECK_LT(i, called_globals_.size()) << "Invalid Index"; return called_globals_[i].second; } @@ -452,7 +452,7 @@ class CallGraphEntry { private: /*! \brief Decrement the reference counter by 1. */ void DecRef() { - CHECK_GT(ref_cnt_, 0); + ICHECK_GT(ref_cnt_, 0); --ref_cnt_; } /*! \brief Increment the reference counter by 1. */ diff --git a/src/relay/analysis/context_analysis.cc b/src/relay/analysis/context_analysis.cc index 5fbd8a4d067f..a648b7af8fd3 100644 --- a/src/relay/analysis/context_analysis.cc +++ b/src/relay/analysis/context_analysis.cc @@ -151,7 +151,7 @@ DeviceDomainPtr Join(const DeviceDomainPtr& lhs, const DeviceDomainPtr& rhs) { } else if (rhs->IsEmptyDomain()) { return lhs; } else { - CHECK(*lhs.get() == *rhs.get()) << "All expressions must have a singular device to unify"; + ICHECK(*lhs.get() == *rhs.get()) << "All expressions must have a singular device to unify"; return lhs; } } @@ -311,7 +311,7 @@ class ContextAnalyzer : public MixedModeVisitor { auto ty = let->value->checked_type(); if (ty->IsInstance()) { auto gv = ExtractClosure(let); - CHECK(gv.defined() && gv->IsInstance()); + ICHECK(gv.defined() && gv->IsInstance()); closures_[let->var] = Downcast(gv); } @@ -444,7 +444,7 @@ class ContextAnalyzer : public MixedModeVisitor { // Process device copy call node void UnifyDeviceCopyCall(const CallNode* call) { - CHECK_EQ(call->args.size(), 1U); + ICHECK_EQ(call->args.size(), 1U); std::vector inps{call->args[0]}; std::vector outs{GetRef(call)}; @@ -455,13 +455,13 @@ class ContextAnalyzer : public MixedModeVisitor { inps.push_back(fn->params[0]); outs.push_back(call->op); Expr body = fn->body; - CHECK(body->IsInstance() && IsDeviceCopy(body)); + ICHECK(body->IsInstance() && IsDeviceCopy(body)); Call call_body = Downcast(body); attrs = call_body->attrs.as(); } else { attrs = call->attrs.as(); } - CHECK(attrs != nullptr); + ICHECK(attrs != nullptr); src_dev_type = static_cast(attrs->src_dev_type); dst_dev_type = static_cast(attrs->dst_dev_type); @@ -474,7 +474,7 @@ class ContextAnalyzer : public MixedModeVisitor { void UnifyAllocStorageCall(const CallNode* call) { // [size, alignment] - CHECK_EQ(call->args.size(), 2U); + ICHECK_EQ(call->args.size(), 2U); // The arguments of alloc storage should be on CPU. for (int i = 0; i < 2; i++) { @@ -490,7 +490,7 @@ class ContextAnalyzer : public MixedModeVisitor { void UnifyAllocTensorCall(const CallNode* call) { // [storage, offset, shape] - CHECK_EQ(call->args.size(), 3U); + ICHECK_EQ(call->args.size(), 3U); Expr storage = call->args[0]; Expr shape = call->args[1]; @@ -503,7 +503,7 @@ class ContextAnalyzer : public MixedModeVisitor { void UnifyShapeFuncCall(const CallNode* call) { // [func, inputs, outputs] - CHECK_EQ(call->args.size(), 3U); + ICHECK_EQ(call->args.size(), 3U); auto shape_func_domain = DeviceType(cpu_ctx_); // No need to unify the op of a shape_func as shape_func doesn't @@ -523,7 +523,7 @@ class ContextAnalyzer : public MixedModeVisitor { void UnifyInvokeTVMOpCall(const CallNode* call) { // [op, inputs, outputs] - CHECK_EQ(call->args.size(), 3U); + ICHECK_EQ(call->args.size(), 3U); Tuple inps = Downcast(call->args[1]); Tuple outputs = Downcast(call->args[2]); UnifyCall(call->args[0], inps->fields, outputs->fields, Bottom()); @@ -532,7 +532,7 @@ class ContextAnalyzer : public MixedModeVisitor { void UnifyShapeOfCall(const CallNode* call) { // vm shape_of is always on the CPU. - CHECK_EQ(call->args.size(), 1U); + ICHECK_EQ(call->args.size(), 1U); MixedModeVisitor::VisitExpr(call->args[0]); // Note we don't unify the input of a shape_of with the cpu domain. This is // because vm.shape_of has a native instruction to compute the shape of @@ -544,7 +544,7 @@ class ContextAnalyzer : public MixedModeVisitor { void UnifyReshapeTensorCall(const CallNode* call) { // [data, shape] - CHECK_EQ(call->args.size(), 2U); + ICHECK_EQ(call->args.size(), 2U); Expr data = call->args[0]; Expr shape = call->args[1]; Unify(DeviceFor(GetRef(call)), DeviceFor(data)); @@ -583,10 +583,10 @@ class ContextAnalyzer : public MixedModeVisitor { // Invoke a global function. void UnifyGlobalVarCall(const CallNode* call) { auto device = DeviceFor(GetRef(call)); - CHECK(mod_.defined()) << "Cannot analyze context on a globalvar without module"; + ICHECK(mod_.defined()) << "Cannot analyze context on a globalvar without module"; GlobalVar gv = Downcast(call->op); auto func = Downcast(mod_->Lookup(gv)); - CHECK_EQ(call->args.size(), func->params.size()) + ICHECK_EQ(call->args.size(), func->params.size()) << "The number of arguments doesn't match the number of parameters of the function."; for (size_t i = 0; i < call->args.size(); i++) { @@ -596,14 +596,14 @@ class ContextAnalyzer : public MixedModeVisitor { // Save the the arg to function mapping for closures as it will // be invoked/unified later. - CHECK(arg->checked_type().defined()) + ICHECK(arg->checked_type().defined()) << "Type inference is required to run the context analysis passes."; if (arg->checked_type()->IsInstance()) { auto it = closures_.find(arg); if (it != closures_.end()) { closures_[param] = it->second; } else { - CHECK(arg->IsInstance()); + ICHECK(arg->IsInstance()); closures_[param] = Downcast(arg); } } @@ -631,9 +631,9 @@ class ContextAnalyzer : public MixedModeVisitor { // Unify the corresponding arguement and parameter. auto device = DeviceFor(GetRef(call)); auto it = closures_.find(call->op); - CHECK(it != closures_.end()) << "Cannot find var: " << call->op; + ICHECK(it != closures_.end()) << "Cannot find var: " << call->op; auto glb_var = it->second; - CHECK(mod_.defined()) << "Cannot analyze context on a globalvar without module"; + ICHECK(mod_.defined()) << "Cannot analyze context on a globalvar without module"; Function func = Downcast(mod_->Lookup(glb_var)); // Unify the underlying function for clousre or currying functions. while (IsClosure(func) || IsCurrying(func)) { @@ -648,7 +648,7 @@ class ContextAnalyzer : public MixedModeVisitor { } } - CHECK_EQ(call->args.size(), func->params.size()); + ICHECK_EQ(call->args.size(), func->params.size()); for (size_t i = 0; i < call->args.size(); i++) { Unify(DeviceFor(call->args[i]), DeviceFor(func->params[i])); MixedModeVisitor::VisitExpr(call->args[i]); diff --git a/src/relay/analysis/dependency_graph.cc b/src/relay/analysis/dependency_graph.cc index de61800d8c52..3a4fb59475a4 100644 --- a/src/relay/analysis/dependency_graph.cc +++ b/src/relay/analysis/dependency_graph.cc @@ -50,7 +50,7 @@ class DependencyGraph::Creator : private ExprFunctor { void Depend(DependencyGraph::Node* parent, const Expr& child) { VisitExpr(child); - CHECK_NE(graph_.expr_node.count(child), 0); + ICHECK_NE(graph_.expr_node.count(child), 0); Depend(parent, graph_.expr_node[child]); } diff --git a/src/relay/analysis/feature.cc b/src/relay/analysis/feature.cc index b3516e965b85..f72b4e105749 100644 --- a/src/relay/analysis/feature.cc +++ b/src/relay/analysis/feature.cc @@ -114,7 +114,7 @@ std::string FeatureSet::ToString() const { DETECT_FEATURE(fGraph); DETECT_FEATURE(fLetRec); #undef DETECT_FEATURE - CHECK(detected == feature_count) << "some feature not printed"; + ICHECK(detected == feature_count) << "some feature not printed"; ret += "]"; return ret; } @@ -139,8 +139,8 @@ TVM_REGISTER_GLOBAL("relay.analysis.detect_feature").set_body_typed(PyDetectFeat void CheckFeature(const Expr& expr, const FeatureSet& fs) { auto dfs = DetectFeature(expr); - CHECK(dfs.is_subset_of(fs)) << AsText(expr, false) - << "\nhas unsupported feature: " << (dfs - fs).ToString(); + ICHECK(dfs.is_subset_of(fs)) << AsText(expr, false) + << "\nhas unsupported feature: " << (dfs - fs).ToString(); } void CheckFeature(const IRModule& mod, const FeatureSet& fs) { diff --git a/src/relay/analysis/get_calibration_data.cc b/src/relay/analysis/get_calibration_data.cc index 34d0d0002b6a..70fe2a68f21e 100644 --- a/src/relay/analysis/get_calibration_data.cc +++ b/src/relay/analysis/get_calibration_data.cc @@ -52,7 +52,7 @@ class Collector : public ExprRewriter { // intrinsic functions are excluded for now if (call->op->IsInstance()) { auto var = Downcast(call->op); - CHECK(module_->ContainGlobalVar(var->name_hint)) << "Function " << var << " is not defined"; + ICHECK(module_->ContainGlobalVar(var->name_hint)) << "Function " << var << " is not defined"; // we only handle functions with Compiler attribute set auto func = Downcast(module_->Lookup(var)); if (func->GetAttr(attr::kCompiler)) { @@ -74,10 +74,10 @@ class Collector : public ExprRewriter { Expr FlattenOutputTuple(const Array& exprs) { Array fields; for (const auto& it : exprs) { - CHECK(it->checked_type_.defined()); + ICHECK(it->checked_type_.defined()); if (auto* tn = it->checked_type_.as()) { // TODO(seanlatias): for now input argument cannot be a tuple - CHECK(it->IsInstance()); + ICHECK(it->IsInstance()); for (size_t i = 0; i < tn->fields.size(); i++) { fields.push_back(TupleGetItem(it, i)); } @@ -140,8 +140,8 @@ class OutputMapper : public ExprRewriter { Expr Rewrite_(const CallNode* call, const Expr& post) final { if (call->op->IsInstance()) { auto var = Downcast(call->op); - CHECK(module_->ContainGlobalVar(var->name_hint)) << "Function " << var << " is not defined"; - CHECK_EQ(output_map_->count(var), 0) + ICHECK(module_->ContainGlobalVar(var->name_hint)) << "Function " << var << " is not defined"; + ICHECK_EQ(output_map_->count(var), 0) << "Repeated function call " << var << " is not supported."; auto func = Downcast(module_->Lookup(var)); // we only handle functions with Compiler attribute set diff --git a/src/relay/analysis/mac_count.cc b/src/relay/analysis/mac_count.cc index 5e35ab7ba62d..29edf55812cc 100644 --- a/src/relay/analysis/mac_count.cc +++ b/src/relay/analysis/mac_count.cc @@ -65,24 +65,24 @@ int64_t ConvMacCount(const Call& call_node) { return 0; } Array args = call_node->args; - CHECK_EQ(args.size(), 2) << "The number of input arguments of a CONV 2D node should be 2."; + ICHECK_EQ(args.size(), 2) << "The number of input arguments of a CONV 2D node should be 2."; const auto* conv_2d_attr = call_node->attrs.as(); const auto* data_type = args[0]->checked_type().as(); Array data_shape = data_type->shape; std::string data_layout = conv_2d_attr->data_layout; int32_t C_ind = Layout(data_layout).IndexOf(LayoutAxis::Get('C')); int32_t c_ind = Layout(data_layout).IndexOf(LayoutAxis::Get('c')); - CHECK_NE(C_ind, -1) << "There is no input channel dimension."; + ICHECK_NE(C_ind, -1) << "There is no input channel dimension."; int64_t input_channel = static_cast(data_shape[C_ind].as()->value); if (c_ind != -1) input_channel *= static_cast(data_shape[c_ind].as()->value); Array kernel_size = conv_2d_attr->kernel_size; - CHECK_EQ(kernel_size.size(), 2) << "The dimension of the kernel in Conv 2D should be 2."; + ICHECK_EQ(kernel_size.size(), 2) << "The dimension of the kernel in Conv 2D should be 2."; const auto* expr = call_node->checked_type().as(); Array output_tensor = expr->shape; - CHECK(output_tensor.size() == 4 || output_tensor.size() == 5) + ICHECK(output_tensor.size() == 4 || output_tensor.size() == 5) << "The dimension of the output tensor in Conv 2D should be 4 or 5."; int64_t count = GetCartesianProd(output_tensor) * GetCartesianProd(kernel_size); - CHECK_EQ(input_channel % conv_2d_attr->groups, 0) + ICHECK_EQ(input_channel % conv_2d_attr->groups, 0) << "The number of input channels is not divisble by groups."; count *= input_channel / conv_2d_attr->groups; return count; @@ -94,7 +94,7 @@ int64_t Conv2dTransposeMacCount(const Call& call_node) { return 0; } Array args = call_node->args; - CHECK_EQ(args.size(), 2) + ICHECK_EQ(args.size(), 2) << "The number of input arguments of a CONV 2D Transpose node should be 2."; const auto* conv_2d_transpose_attr = call_node->attrs.as(); const auto* data_type = args[0]->checked_type().as(); @@ -102,18 +102,18 @@ int64_t Conv2dTransposeMacCount(const Call& call_node) { std::string data_layout = conv_2d_transpose_attr->data_layout; int32_t C_ind = Layout(data_layout).IndexOf(LayoutAxis::Get('C')); int32_t c_ind = Layout(data_layout).IndexOf(LayoutAxis::Get('c')); - CHECK_NE(C_ind, -1) << "There is no input channel dimension."; + ICHECK_NE(C_ind, -1) << "There is no input channel dimension."; int64_t input_channel = static_cast(data_shape[C_ind].as()->value); if (c_ind != -1) input_channel *= static_cast(data_shape[c_ind].as()->value); Array kernel_size = conv_2d_transpose_attr->kernel_size; - CHECK_EQ(kernel_size.size(), 2) + ICHECK_EQ(kernel_size.size(), 2) << "The dimension of the kernel in Conv 2D Transpose should be 2."; const auto* expr = call_node->checked_type().as(); Array output_tensor = expr->shape; - CHECK(output_tensor.size() == 4 || output_tensor.size() == 5) + ICHECK(output_tensor.size() == 4 || output_tensor.size() == 5) << "The dimension of the output tensor in Conv 2D Transpose should be 4 or 5."; int64_t count = GetCartesianProd(output_tensor) * GetCartesianProd(kernel_size); - CHECK_EQ(input_channel % conv_2d_transpose_attr->groups, 0) + ICHECK_EQ(input_channel % conv_2d_transpose_attr->groups, 0) << "The number of input channels is not divisble by groups."; count *= input_channel / conv_2d_transpose_attr->groups; return count; @@ -125,18 +125,18 @@ int64_t DenseMacCount(const Call& call_node) { return 0; } Array args = call_node->args; - CHECK_EQ(args.size(), 2) << "The number of input arguments of a Dense node should be 2."; + ICHECK_EQ(args.size(), 2) << "The number of input arguments of a Dense node should be 2."; const auto* data_type = args[0]->checked_type().as(); const auto* weight_type = args[1]->checked_type().as(); Array data_shape = data_type->shape; Array weight_shape = weight_type->shape; - CHECK(data_shape.size() == 2 && weight_shape.size() == 2) + ICHECK(data_shape.size() == 2 && weight_shape.size() == 2) << "The dimension of an input tensor to Dense node should be 2."; int64_t d1 = static_cast(data_shape[0].as()->value); int64_t d2 = static_cast(data_shape[1].as()->value); int64_t d3 = static_cast(weight_shape[0].as()->value); int64_t d4 = static_cast(weight_shape[1].as()->value); - CHECK_EQ(d2, d4) << "The dimensions of input arguments do not match."; + ICHECK_EQ(d2, d4) << "The dimensions of input arguments do not match."; int64_t count = d1 * d2 * d3; return count; } @@ -147,7 +147,7 @@ int64_t BatchMatmulMacCount(const Call& call_node) { return 0; } Array args = call_node->args; - CHECK_EQ(args.size(), 2); + ICHECK_EQ(args.size(), 2); Array x_shape = args[0]->checked_type().as()->shape; Array y_shape = args[1]->checked_type().as()->shape; int64_t batch = x_shape[0].as()->value; diff --git a/src/relay/analysis/match_exhaustion.cc b/src/relay/analysis/match_exhaustion.cc index e852c40dfeba..bb6e8f14ca09 100644 --- a/src/relay/analysis/match_exhaustion.cc +++ b/src/relay/analysis/match_exhaustion.cc @@ -68,7 +68,7 @@ class CandidateChecker : public PatternFunctorpatterns.size(), ctor_cand->patterns.size()); + ICHECK_EQ(op->patterns.size(), ctor_cand->patterns.size()); bool unspecified = false; for (size_t i = 0; i < op->patterns.size(); i++) { MatchResult submatch = this->Check(op->patterns[i], ctor_cand->patterns[i]); @@ -95,7 +95,7 @@ class CandidateChecker : public PatternFunctorpatterns.size(), tuple_cand->patterns.size()); + ICHECK_EQ(op->patterns.size(), tuple_cand->patterns.size()); bool unspecified = false; for (size_t i = 0; i < op->patterns.size(); i++) { MatchResult submatch = this->Check(op->patterns[i], tuple_cand->patterns[i]); @@ -126,7 +126,7 @@ class CandidateChecker : public PatternFunctor> CartesianProduct(Array> fields) { - CHECK_NE(fields.size(), 0); + ICHECK_NE(fields.size(), 0); Array field_vals = fields[fields.size() - 1]; Array> ret; diff --git a/src/relay/analysis/type_solver.cc b/src/relay/analysis/type_solver.cc index 8c1cc92fe009..55f736895018 100644 --- a/src/relay/analysis/type_solver.cc +++ b/src/relay/analysis/type_solver.cc @@ -114,14 +114,14 @@ class TypeSolver::Unifier : public TypeFunctor { } if (lhs->resolved_type.as()) { - CHECK(!OccursCheck(lhs, rhs->resolved_type)) + ICHECK(!OccursCheck(lhs, rhs->resolved_type)) << "Incomplete type " << lhs->resolved_type << " occurs in " << rhs->resolved_type << ", cannot unify"; solver_->MergeFromTo(lhs, rhs); return rhs->resolved_type; } else if (rhs->resolved_type.as()) { - CHECK(!OccursCheck(rhs, lhs->resolved_type)) + ICHECK(!OccursCheck(rhs, lhs->resolved_type)) << "Incomplete type " << rhs->resolved_type << " occurs in " << lhs->resolved_type << ", cannot unify"; solver_->MergeFromTo(rhs, lhs); @@ -242,7 +242,7 @@ class TypeSolver::Unifier : public TypeFunctor { std::vector> mismatches; - CHECK_EQ(tt1->shape.size(), tt2->shape.size()); + ICHECK_EQ(tt1->shape.size(), tt2->shape.size()); for (size_t i = 0; i < tt1->shape.size(); i++) { auto dim = UnifyDim(tt1->shape[i], tt2->shape[i]); if (!dim.defined()) { @@ -328,8 +328,8 @@ class TypeSolver::Unifier : public TypeFunctor { for (size_t i = 0; i < ft1->type_constraints.size(); ++i) { Type unified_constraint = Unify(ft1->type_constraints[i], ft2->type_constraints[i]); const auto* tcn = unified_constraint.as(); - CHECK(tcn) << "Two type constraints unified into a non-constraint?" - << ft1->type_constraints[i] << " and " << ft2->type_constraints[i]; + ICHECK(tcn) << "Two type constraints unified into a non-constraint?" + << ft1->type_constraints[i] << " and " << ft2->type_constraints[i]; type_constraints.push_back(GetRef(tcn)); } @@ -527,7 +527,7 @@ TypeSolver::TypeSolver(const GlobalVar& current_func, DiagnosticContext diag_ctx current_func(current_func), diag_ctx_(diag_ctx), module_(diag_ctx->module) { - CHECK(module_.defined()); + ICHECK(module_.defined()); } // destructor @@ -593,12 +593,12 @@ bool TypeSolver::Solve() { RelationNode* rnode = update_queue_.front(); const auto& rel = rnode->rel; update_queue_.pop(); - CHECK(!rnode->resolved); + ICHECK(!rnode->resolved); // update the relation with given evidence. Array args; for (auto* tlink = rnode->type_list.head; tlink != nullptr; tlink = tlink->next) { args.push_back(Resolve(tlink->value->FindRoot()->resolved_type)); - CHECK_LE(args.size(), rel->args.size()); + ICHECK_LE(args.size(), rel->args.size()); } // We need to set this in order to understand where unification diff --git a/src/relay/analysis/type_solver.h b/src/relay/analysis/type_solver.h index 1fc0525d6bca..4ae2e6a2b07b 100644 --- a/src/relay/analysis/type_solver.h +++ b/src/relay/analysis/type_solver.h @@ -208,7 +208,7 @@ class TypeSolver { */ void AddToQueue(RelationNode* rel) { if (rel->inqueue) return; - CHECK(!rel->resolved); + ICHECK(!rel->resolved); rel->inqueue = true; update_queue_.push(rel); } diff --git a/src/relay/analysis/util.cc b/src/relay/analysis/util.cc index edf8fb644c57..bcfbc83da514 100644 --- a/src/relay/analysis/util.cc +++ b/src/relay/analysis/util.cc @@ -358,9 +358,9 @@ std::unordered_map GetExprRefCount(const Expr& body) { template bool IsNDArrayAllGreaterEqual(const runtime::NDArray& tensor, T value) { - CHECK_EQ(tensor->ctx.device_type, kDLCPU); - CHECK(tensor->strides == nullptr); - CHECK_EQ(tensor->byte_offset, 0); + ICHECK_EQ(tensor->ctx.device_type, kDLCPU); + ICHECK(tensor->strides == nullptr); + ICHECK_EQ(tensor->byte_offset, 0); const T* data = static_cast(tensor->data); int64_t num_elems = 1; for (int i = 0; i < tensor->ndim; ++i) { @@ -446,10 +446,10 @@ Expr TypeSubst(const Expr& expr, const tvm::Map& subst_map) { private: const tvm::Map& subst_map_; }; - CHECK(WellFormed(expr)); + ICHECK(WellFormed(expr)); auto ret = TypeSubstMutator(subst_map).VisitExpr(expr); - CHECK_EQ(FreeVars(expr).size(), FreeVars(ret).size()); - CHECK(WellFormed(ret)); + ICHECK_EQ(FreeVars(expr).size(), FreeVars(ret).size()); + ICHECK(WellFormed(ret)); return ret; } diff --git a/src/relay/analysis/well_formed.cc b/src/relay/analysis/well_formed.cc index 0b6e043a0d21..856c5dc7aac1 100644 --- a/src/relay/analysis/well_formed.cc +++ b/src/relay/analysis/well_formed.cc @@ -59,9 +59,9 @@ class WellFormedChecker : private MixedModeVisitor, PatternVisitor { WellFormedChecker* wfc; explicit Scope(WellFormedChecker* wfc) : wfc(wfc) { wfc->scope.push_back({{}}); } ~Scope() { - CHECK_GE(wfc->scope.size(), 0); + ICHECK_GE(wfc->scope.size(), 0); for (const Var& v : wfc->scope.back()) { - CHECK_GE(wfc->current_bound.count(v), 0); + ICHECK_GE(wfc->current_bound.count(v), 0); wfc->current_bound.erase(v); } wfc->scope.pop_back(); @@ -73,7 +73,7 @@ class WellFormedChecker : private MixedModeVisitor, PatternVisitor { Illformed(Diagnostic::Error(v->span) << "the variable " << v->name_hint() << "is bound more then once, this is not valid IR"); } - CHECK_GE(scope.size(), 0); + ICHECK_GE(scope.size(), 0); scope.back().insert(v); current_bound.insert(v); total_bound.insert(v); @@ -120,14 +120,14 @@ class WellFormedChecker : private MixedModeVisitor, PatternVisitor { } void VisitExpr_(const CallNode* call) final { - CHECK(call->op.defined()); + ICHECK(call->op.defined()); for (auto arg : call->args) { - CHECK(arg.defined()); + ICHECK(arg.defined()); } - // CHECK(call->attrs.defined()); - CHECK(call->type_args.defined()); + // ICHECK(call->attrs.defined()); + ICHECK(call->type_args.defined()); MixedModeVisitor::VisitExpr_(call); } diff --git a/src/relay/backend/build_module.cc b/src/relay/backend/build_module.cc index 64f1253ff9db..ddea5456585b 100644 --- a/src/relay/backend/build_module.cc +++ b/src/relay/backend/build_module.cc @@ -124,7 +124,7 @@ class RelayBuildModule : public runtime::ModuleNode { [sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { *rv = this->GetModule(); }); } else if (name == "build") { return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { - CHECK_EQ(args.num_args, 3); + ICHECK_EQ(args.num_args, 3); this->Build(args[0], args[1], args[2]); }); } else if (name == "list_params") { @@ -150,7 +150,7 @@ class RelayBuildModule : public runtime::ModuleNode { }); } else if (name == "optimize") { return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { - CHECK_EQ(args.num_args, 2); + ICHECK_EQ(args.num_args, 2); *rv = this->Optimize(args[0], args[1], this->params_); }); } else { @@ -244,7 +244,7 @@ class RelayBuildModule : public runtime::ModuleNode { ICHECK(relay_module.defined()) << "The IRModule must be defined for the Relay compiler."; if (params.size()) { - CHECK(relay_module->ContainGlobalVar("main")) << "Missing the main entry function"; + ICHECK(relay_module->ContainGlobalVar("main")) << "Missing the main entry function"; GlobalVar main_glb_var = relay_module->GetGlobalVar("main"); Function main_func = Downcast(relay_module->Lookup(main_glb_var)); auto new_main = BindParamsByName(main_func, params); @@ -319,7 +319,7 @@ class RelayBuildModule : public runtime::ModuleNode { Optional opt_fallback_dev = pass_ctx->GetConfig("relay.fallback_device_type", Integer(static_cast(kDLCPU))); auto fallback_dev = opt_fallback_dev.value(); - CHECK_GT(fallback_dev->value, 0U); + ICHECK_GT(fallback_dev->value, 0U); relay_module = RunDeviceAnnotationPass(relay_module, fallback_dev->value); } @@ -335,7 +335,7 @@ class RelayBuildModule : public runtime::ModuleNode { relay_module = transform::Inline()(relay_module); relay_module = transform::InferType()(relay_module); - CHECK(relay_module.defined()); + ICHECK(relay_module.defined()); return relay_module; } @@ -383,7 +383,7 @@ class RelayBuildModule : public runtime::ModuleNode { UpdateHeterogeneousInputs(fallback_device); auto rewrite = transform::RewriteAnnotatedOps(fallback_device); auto updated_module = rewrite(relay_module); - CHECK(updated_module.defined()); + ICHECK(updated_module.defined()); tvm::Map device_map; for (const auto& it : updated_module->functions) { @@ -408,11 +408,11 @@ class RelayBuildModule : public runtime::ModuleNode { break; } for (auto kv : annotation_map) { - CHECK_EQ(kv.second->value, dev_type) << "Expressions in the function are " - << "annotated with various device types," - << "but not device copy operators " - << "found. Please check the " - << "RewriteAnnotation pass."; + ICHECK_EQ(kv.second->value, dev_type) << "Expressions in the function are " + << "annotated with various device types," + << "but not device copy operators " + << "found. Please check the " + << "RewriteAnnotation pass."; } targets_.Set(0, CreateDefaultTarget(dev_type)); } diff --git a/src/relay/backend/compile_engine.cc b/src/relay/backend/compile_engine.cc index d720e94ddc75..556687c453ac 100644 --- a/src/relay/backend/compile_engine.cc +++ b/src/relay/backend/compile_engine.cc @@ -79,8 +79,8 @@ Array GetShape(const Array& shape) { const int64_t* pval = tir::as_const_int(val); if (pval != nullptr) { #ifndef TVM_INDEX_DEFAULT_I64 - CHECK_LE(pval[0], std::numeric_limits::max()); - CHECK_GE(pval[0], std::numeric_limits::min()); + ICHECK_LE(pval[0], std::numeric_limits::max()); + ICHECK_GE(pval[0], std::numeric_limits::min()); res.push_back(IntImm(DataType::Int(32), *pval)); #else res.push_back(val); @@ -116,7 +116,7 @@ class ScheduleGetter : public backend::MemoizedExprTranslator> for (Type field : tuple_type->fields) { const auto* ttype = field.as(); // TODO(@icemelon): Allow recursive tuple - CHECK(ttype != nullptr); + ICHECK(ttype != nullptr); tvm::te::Tensor tensor = tvm::te::placeholder(GetShape(ttype->shape), ttype->dtype); cache_node->inputs.push_back(tensor); inputs.push_back(tensor); @@ -135,7 +135,7 @@ class ScheduleGetter : public backend::MemoizedExprTranslator> candidate_name = truncated_name.str(); } cache_node->func_name = candidate_name; - CHECK(anchor_op_.defined()); + ICHECK(anchor_op_.defined()); // Fusion over tupled results may leave identity relationships // between inputs and outputs, and those should not be scheduled. // Hence schedule only non PlaceholderOp outputs. @@ -148,7 +148,7 @@ class ScheduleGetter : public backend::MemoizedExprTranslator> te::Schedule schedule; // No need to register schedule for device copy op. if (anchor_attrs_.as() == nullptr) { - CHECK(anchor_implementation_.defined()); + ICHECK(anchor_implementation_.defined()); schedule = anchor_implementation_.Schedule(anchor_attrs_, tensor_outs, target_); for (const auto& scalar : scalars_) { if (schedule->Contain(scalar)) { @@ -167,7 +167,7 @@ class ScheduleGetter : public backend::MemoizedExprTranslator> Array VisitExpr_(const ConstantNode* op) final { using tir::make_const; - CHECK(op->is_scalar()); + ICHECK(op->is_scalar()); void* data = op->data->data; DataType dtype = DataType(op->data->dtype); auto value = te::compute( @@ -196,7 +196,7 @@ class ScheduleGetter : public backend::MemoizedExprTranslator> Array VisitExpr_(const CallNode* call_node) final { static auto fpattern = Op::GetAttrMap("TOpPattern"); static auto flower_call = tvm::runtime::Registry::Get("relay.backend.lower_call"); - CHECK(flower_call) << "relay.backend.lower_call is not registered."; + ICHECK(flower_call) << "relay.backend.lower_call is not registered."; Array inputs; int count_tuple = 0; @@ -209,10 +209,10 @@ class ScheduleGetter : public backend::MemoizedExprTranslator> } } if (count_tuple) { - CHECK_EQ(call_node->args.size(), 1U) << "Only allow function with a single tuple input"; + ICHECK_EQ(call_node->args.size(), 1U) << "Only allow function with a single tuple input"; } - CHECK(call_node->op.as()) << "Primitive function only allows call into primitive ops"; + ICHECK(call_node->op.as()) << "Primitive function only allows call into primitive ops"; Op op = Downcast(call_node->op); Array outputs; @@ -229,7 +229,7 @@ class ScheduleGetter : public backend::MemoizedExprTranslator> int op_pattern = fpattern[op]; if (op_pattern >= kCommReduce) { - CHECK(!anchor_op_.defined() || anchor_op_pattern_ < kCommReduce) + ICHECK(!anchor_op_.defined() || anchor_op_pattern_ < kCommReduce) << "Two complicated op in a primitive function " << " anchor=" << anchor_op_ << " current=" << op; } @@ -241,8 +241,8 @@ class ScheduleGetter : public backend::MemoizedExprTranslator> } if (outputs.size() != 1) { const auto* tuple_type = call_node->checked_type().as(); - CHECK(tuple_type) << "Expect output to be a tuple type"; - CHECK_EQ(tuple_type->fields.size(), outputs.size()); + ICHECK(tuple_type) << "Expect output to be a tuple type"; + ICHECK_EQ(tuple_type->fields.size(), outputs.size()); } // Set the name to `__copy`. It will be detected in graph runtime to perform // data copy across devices. @@ -262,7 +262,7 @@ class ScheduleGetter : public backend::MemoizedExprTranslator> Array VisitExpr_(const LetNode* op) final { Array val = VisitExpr(op->value); - CHECK(!memo_.count(op->var)); + ICHECK(!memo_.count(op->var)); memo_[op->var] = val; return VisitExpr(op->body); } @@ -270,9 +270,9 @@ class ScheduleGetter : public backend::MemoizedExprTranslator> Array VisitExpr_(const TupleNode* op) final { Array fields; for (Expr field : op->fields) { - CHECK(field->checked_type().as()) << "Only allow Tuple of Tensor"; + ICHECK(field->checked_type().as()) << "Only allow Tuple of Tensor"; Array res = VisitExpr(field); - CHECK_EQ(res.size(), 1); + ICHECK_EQ(res.size(), 1); fields.push_back(res[0]); } return fields; @@ -281,9 +281,9 @@ class ScheduleGetter : public backend::MemoizedExprTranslator> Array VisitExpr_(const TupleGetItemNode* op) final { const auto* tuple_type = op->tuple->type_as(); Array tuple = VisitExpr(op->tuple); - CHECK_EQ(tuple_type->fields.size(), tuple.size()); - CHECK_GE(op->index, 0); - CHECK_LT(static_cast(op->index), tuple.size()); + ICHECK_EQ(tuple_type->fields.size(), tuple.size()); + ICHECK_GE(op->index, 0); + ICHECK_LT(static_cast(op->index), tuple.size()); return {tuple[op->index]}; } @@ -332,10 +332,10 @@ class MakeShapeFunc : public backend::MemoizedExprTranslator> // flatten tuple of tensor type. const auto* tuple_type = param->type_as(); // TODO(@icemelon): Support recursive tuple - CHECK(tuple_type); + ICHECK(tuple_type); for (Type field : tuple_type->fields) { const auto* ttype = field.as(); - CHECK(ttype); + ICHECK(ttype); add_placeholder(ttype); } } @@ -405,7 +405,7 @@ class MakeShapeFunc : public backend::MemoizedExprTranslator> LOG(FATAL) << "Free variable " << var->name_hint(); return {}; } else { - CHECK(data_dependants_.size()); + ICHECK(data_dependants_.size()); bool data_dependant = data_dependants_.back(); if (data_dependant) { param_states_[var] |= kNeedInputData; @@ -419,8 +419,8 @@ class MakeShapeFunc : public backend::MemoizedExprTranslator> Array VisitExpr_(const ConstantNode* op) final { using tir::make_const; - CHECK(data_dependants_.size()); - CHECK(op->is_scalar()); + ICHECK(data_dependants_.size()); + ICHECK(op->is_scalar()); bool data_dependant = data_dependants_.back(); if (data_dependant) { void* data = op->data->data; @@ -458,13 +458,13 @@ class MakeShapeFunc : public backend::MemoizedExprTranslator> Array VisitExpr_(const CallNode* call_node) final { static auto fshape_func = Op::GetAttrMap("FShapeFunc"); static auto tshape_data_dependant = Op::GetAttrMap("TShapeDataDependant"); - CHECK(call_node->op.as()) << "Primitive function only allows call into primitive ops"; + ICHECK(call_node->op.as()) << "Primitive function only allows call into primitive ops"; Op op = Downcast(call_node->op); - CHECK(data_dependants_.empty() || !data_dependants_.back()) + ICHECK(data_dependants_.empty() || !data_dependants_.back()) << "Error in op fusion: output of the shape func is fed to a " << "data-dependant shape func"; - CHECK_GT(fshape_func.count(op), 0) << "Internal error, cannot find ShapeFunc for " << op->name; - CHECK_GT(tshape_data_dependant.count(op), 0) + ICHECK_GT(fshape_func.count(op), 0) << "Internal error, cannot find ShapeFunc for " << op->name; + ICHECK_GT(tshape_data_dependant.count(op), 0) << "Internal error, cannot find TShapeDataDependant for " << op->name; data_dependants_.push_back(IsDataDependant(call_node)); @@ -480,7 +480,7 @@ class MakeShapeFunc : public backend::MemoizedExprTranslator> } } if (count_tuple) { - CHECK_EQ(call_node->args.size(), 1U) << "Only allow function with a single tuple input"; + ICHECK_EQ(call_node->args.size(), 1U) << "Only allow function with a single tuple input"; } // Get output ndims auto ret_type = call_node->checked_type(); @@ -490,10 +490,10 @@ class MakeShapeFunc : public backend::MemoizedExprTranslator> } else { auto rtype = ret_type.as(); // TODO(@icemelon): Allow recursive tuple - CHECK(rtype); + ICHECK(rtype); for (size_t i = 0; i < rtype->fields.size(); ++i) { auto ttype = rtype->fields[i].as(); - CHECK(ttype); + ICHECK(ttype); out_ndims.push_back(IntImm(DataType::Int(32), ttype->shape.size())); } } @@ -511,7 +511,7 @@ class MakeShapeFunc : public backend::MemoizedExprTranslator> Array VisitExpr_(const LetNode* op) final { Array val = VisitExpr(op->value); - CHECK(!memo_.count(op->var)); + ICHECK(!memo_.count(op->var)); memo_[op->var] = val; return VisitExpr(op->body); } @@ -519,9 +519,9 @@ class MakeShapeFunc : public backend::MemoizedExprTranslator> Array VisitExpr_(const TupleNode* op) final { Array fields; for (Expr field : op->fields) { - CHECK(field->checked_type().as()) << "Only allow Tuple of Tensor"; + ICHECK(field->checked_type().as()) << "Only allow Tuple of Tensor"; Array res = VisitExpr(field); - CHECK_EQ(res.size(), 1); + ICHECK_EQ(res.size(), 1); fields.push_back(res[0]); } return fields; @@ -579,34 +579,34 @@ class CompileEngineImpl : public CompileEngineNode { std::vector cached_ext_funcs; for (const auto& it : cache_) { auto src_func = it.first->source_func; - CHECK(src_func.defined()); + ICHECK(src_func.defined()); if (src_func->GetAttr(attr::kCompiler).defined()) { auto code_gen = src_func->GetAttr(attr::kCompiler); - CHECK(code_gen.defined()) << "No external codegen is set"; + ICHECK(code_gen.defined()) << "No external codegen is set"; std::string code_gen_name = code_gen.value(); cached_ext_funcs.push_back(it.first); auto symbol_name = src_func->GetAttr(tvm::attr::kGlobalSymbol); - CHECK(symbol_name.defined()) << "No external symbol is set for:\n" - << AsText(src_func, false); + ICHECK(symbol_name.defined()) << "No external symbol is set for:\n" + << AsText(src_func, false); std::string sn = symbol_name.value(); if (cached_symbol.count(sn)) { cached_symbol[sn] = code_gen_name; } else { - CHECK_NE(sn, code_gen_name) + ICHECK_NE(sn, code_gen_name) << "Found duplicated symbol: " << sn << " for: " << code_gen_name; } std::string ext_name = "relay.ext." + code_gen_name; auto pf = tvm::runtime::Registry::Get(ext_name); - CHECK(pf) << "Failed to find the codegen tool for " << ext_name << "\n"; + ICHECK(pf) << "Failed to find the codegen tool for " << ext_name << "\n"; // No need to keep compiler attribute at this point, functions have been // extracted for specific codegen. src_func = WithAttr(std::move(src_func), attr::kCompiler, NullValue()); runtime::Module ext_mod = (*pf)(src_func); - CHECK(ext_mod.defined()) << "No external runtime is generated."; + ICHECK(ext_mod.defined()) << "No external runtime is generated."; ret.push_back(ext_mod); } } @@ -661,7 +661,7 @@ class CompileEngineImpl : public CompileEngineNode { if (key->source_func->GetAttr(attr::kCompiler).defined()) { auto cache_node = make_object(); const auto name_node = key->source_func->GetAttr(tvm::attr::kGlobalSymbol); - CHECK(name_node.defined()) << "External function has not been attached a name yet."; + ICHECK(name_node.defined()) << "External function has not been attached a name yet."; cache_node->func_name = std::string(name_node.value()); cache_node->target = Target("ext_dev"); cache_node->funcs->Add(GlobalVar(cache_node->func_name), key->source_func); @@ -671,7 +671,7 @@ class CompileEngineImpl : public CompileEngineNode { // Enforce use the target. With target_scope(key->target); - CHECK(!value->cached_func.defined()); + ICHECK(!value->cached_func.defined()); auto cfunc = CreateSchedule(key->source_func, key->target); auto cache_node = make_object(*(cfunc.operator->())); @@ -720,7 +720,7 @@ class CompileEngineImpl : public CompileEngineNode { // Enforce use the target. With target_scope(key->target); - CHECK(!value->cached_func.defined()); + ICHECK(!value->cached_func.defined()); auto spair = MakeShapeFunc().Create(key->source_func); auto cache_node = make_object(*(spair.second.operator->())); cache_node->func_name = GetUniqueName(cache_node->func_name); diff --git a/src/relay/backend/compile_engine.h b/src/relay/backend/compile_engine.h index 95166c74f891..55822917b6b7 100644 --- a/src/relay/backend/compile_engine.h +++ b/src/relay/backend/compile_engine.h @@ -154,7 +154,7 @@ class CCacheKey : public ObjectRef { const CCacheKeyNode* operator->() const { return static_cast(get()); } // comparator inline bool operator==(const CCacheKey& other) const { - CHECK(defined() && other.defined()); + ICHECK(defined() && other.defined()); return (*this)->Equal(other.operator->()); } using ContainerType = CCacheKeyNode; @@ -272,7 +272,7 @@ namespace std { template <> struct hash<::tvm::relay::CCacheKey> { size_t operator()(const ::tvm::relay::CCacheKey& key) const { - CHECK(key.defined()); + ICHECK(key.defined()); return key->Hash(); } }; diff --git a/src/relay/backend/contrib/arm_compute_lib/codegen.cc b/src/relay/backend/contrib/arm_compute_lib/codegen.cc index 087c895f4614..a963242f82d5 100644 --- a/src/relay/backend/contrib/arm_compute_lib/codegen.cc +++ b/src/relay/backend/contrib/arm_compute_lib/codegen.cc @@ -87,7 +87,7 @@ class ACLJSONSerializer : public backend::contrib::JSONSerializer { } auto fn = cn->op.as(); auto comp = fn->GetAttr(attr::kComposite); - CHECK(comp.defined()) << "Arm Compute Library JSON runtime only supports composite functions."; + ICHECK(comp.defined()) << "Arm Compute Library JSON runtime only supports composite functions."; const std::string name = comp.value(); std::shared_ptr json_node; if (name == "arm_compute_lib.conv2d" || name == "arm_compute_lib.qnn_conv2d") { @@ -114,7 +114,7 @@ class ACLJSONSerializer : public backend::contrib::JSONSerializer { static CompositeConvNode UnpackCompositeConvolution(const CallNode* cn) { CompositeConvNode nodes{}; const auto* fn = cn->op.as(); - CHECK(fn); + ICHECK(fn); // Traverse composite convolution function from child to parent const auto* current_call = fn->body.as(); @@ -132,9 +132,9 @@ class ACLJSONSerializer : public backend::contrib::JSONSerializer { } // Enforce a convolution node exists at this point during traversal if (nodes.requantize) { - CHECK(backend::IsOp(current_call, "qnn.conv2d")); + ICHECK(backend::IsOp(current_call, "qnn.conv2d")); } else { - CHECK(backend::IsOp(current_call, "nn.conv2d")); + ICHECK(backend::IsOp(current_call, "nn.conv2d")); } nodes.conv = current_call; if (!current_call->args.empty() && current_call->args[0]->IsInstance()) { @@ -157,8 +157,8 @@ class ACLJSONSerializer : public backend::contrib::JSONSerializer { std::string name = "nn.conv2d"; const auto* conv_attr = nodes.conv->attrs.as(); - CHECK(conv_attr); - CHECK(conv_attr->kernel_layout == "OHWI") + ICHECK(conv_attr); + ICHECK(conv_attr->kernel_layout == "OHWI") << "Kernel layout must be OHWI, has the module been pre-processed correctly?"; // Inputs must be added in the same order they appear in the relay graph. @@ -186,7 +186,7 @@ class ACLJSONSerializer : public backend::contrib::JSONSerializer { // Override attributes if (nodes.pad) { const auto* pad_attr = nodes.pad->attrs.as(); - CHECK(pad_attr); + ICHECK(pad_attr); auto p = pad_attr->pad_width; // Convert to TVM layout for now, conversion to ACL layout takes place in runtime. // Standard convolution pad layout for TVM: top, left, bottom, right. @@ -216,7 +216,7 @@ class ACLJSONSerializer : public backend::contrib::JSONSerializer { static CompositeDenseNode UnpackCompositeDense(const CallNode* cn) { CompositeDenseNode nodes{}; const auto* fn = cn->op.as(); - CHECK(fn); + ICHECK(fn); // Traverse composite dense function from child to parent const auto* current_call = fn->body.as(); @@ -230,9 +230,9 @@ class ACLJSONSerializer : public backend::contrib::JSONSerializer { } // Enforce a dense node exists at this point during traversal if (nodes.requantize) { - CHECK(backend::IsOp(current_call, "qnn.dense")); + ICHECK(backend::IsOp(current_call, "qnn.dense")); } else { - CHECK(backend::IsOp(current_call, "nn.dense")); + ICHECK(backend::IsOp(current_call, "nn.dense")); } nodes.dense = current_call; return nodes; @@ -282,13 +282,13 @@ class ACLJSONSerializer : public backend::contrib::JSONSerializer { */ std::shared_ptr CreateCompositeAvgPool2DJSONNode(const CallNode* cn) { const auto* fn = cn->op.as(); - CHECK(fn); + ICHECK(fn); const auto* cast = fn->body.as(); - CHECK(cast); + ICHECK(cast); const auto* avg_pool = cast->args[0].as(); - CHECK(avg_pool); + ICHECK(avg_pool); const auto* avg_pool_op = avg_pool->op.as(); - CHECK(avg_pool_op); + ICHECK(avg_pool_op); const std::string name = avg_pool_op->name; std::vector inputs; @@ -310,16 +310,16 @@ class ACLJSONSerializer : public backend::contrib::JSONSerializer { std::shared_ptr CreateCompositeL2Pool2DJSONNode(const CallNode* cn) { const std::string name = "nn.l2_pool2d"; const auto* fn = cn->op.as(); - CHECK(fn); + ICHECK(fn); const auto* sqrt = fn->body.as(); - CHECK(sqrt); + ICHECK(sqrt); const auto* avg_pool = sqrt->args[0].as(); - CHECK(avg_pool); + ICHECK(avg_pool); const auto* pow = avg_pool->args[0].as(); - CHECK(pow); + ICHECK(pow); const auto* exponent = pow->args[1].as(); - CHECK(exponent); - CHECK_EQ(*static_cast(exponent->data->data), 2) << "Exponent must be 2 for L2 pooling"; + ICHECK(exponent); + ICHECK_EQ(*static_cast(exponent->data->data), 2) << "Exponent must be 2 for L2 pooling"; std::vector inputs; inputs.push_back(VisitExpr(cn->args[0])[0]); @@ -363,7 +363,7 @@ TVM_REGISTER_GLOBAL("relay.ext.arm_compute_lib.optimize").set_body_typed(PreProc * \return A runtime module. */ runtime::Module ACLCompiler(const ObjectRef& ref) { - CHECK(ref->IsInstance()) << "The input ref is expected to be a Relay function."; + ICHECK(ref->IsInstance()) << "The input ref is expected to be a Relay function."; Function func = Downcast(ref); std::string func_name = backend::GetExtSymbol(func); @@ -372,7 +372,7 @@ runtime::Module ACLCompiler(const ObjectRef& ref) { std::string graph_json = serializer.GetJSON(); auto param_names = serializer.GetParams(); const auto* pf = runtime::Registry::Get("runtime.arm_compute_lib_runtime_create"); - CHECK(pf != nullptr) << "Cannot find JSON runtime module to create"; + ICHECK(pf != nullptr) << "Cannot find JSON runtime module to create"; runtime::Module lib = (*pf)(func_name, graph_json, param_names); return lib; } diff --git a/src/relay/backend/contrib/codegen_c/codegen.cc b/src/relay/backend/contrib/codegen_c/codegen.cc index c7b5a8da1fed..935ac16efb23 100644 --- a/src/relay/backend/contrib/codegen_c/codegen.cc +++ b/src/relay/backend/contrib/codegen_c/codegen.cc @@ -61,7 +61,7 @@ class CodegenC : public MemoizedExprTranslator>, public Code std::vector outs; for (auto field : node->fields) { auto res = VisitExpr(field); - CHECK_EQ(res.size(), 1U) << "Do not support tuple nest"; + ICHECK_EQ(res.size(), 1U) << "Do not support tuple nest"; outs.push_back(res[0]); } return outs; @@ -69,7 +69,7 @@ class CodegenC : public MemoizedExprTranslator>, public Code std::vector VisitExpr_(const TupleGetItemNode* op) final { auto res = VisitExpr(op->tuple); - CHECK_GT(res.size(), static_cast(op->index)); + ICHECK_GT(res.size(), static_cast(op->index)); // Only keep the item we want for the child node. // FIXME(@comaniac): The other items should still be requried for the primary outputs. @@ -84,7 +84,7 @@ class CodegenC : public MemoizedExprTranslator>, public Code // Get const: static_cast(gcc_0_consts[0]->data) output.name = CreateDataReference(ext_func_id_, const_idx_); const auto* type_node = cn->checked_type().as(); - CHECK(type_node); + ICHECK(type_node); const auto& dtype = GetDtypeString(type_node); // Generate the global variable for needed ndarrays @@ -94,7 +94,7 @@ class CodegenC : public MemoizedExprTranslator>, public Code ext_func_body_.insert(ext_func_body_.begin(), checker); } - CHECK(dtype == "float" || dtype == "int") << "Only float and int are supported for now."; + ICHECK(dtype == "float" || dtype == "int") << "Only float and int are supported for now."; output.dtype = dtype; std::string const_var_name = CreateConstVar(ext_func_id_, const_idx_); @@ -130,7 +130,7 @@ class CodegenC : public MemoizedExprTranslator>, public Code } const auto* type_node = call->checked_type().as(); - CHECK(type_node); + ICHECK(type_node); const auto& dtype = GetDtypeString(type_node); macro_stream << ", " << dtype; @@ -216,7 +216,7 @@ class CodegenC : public MemoizedExprTranslator>, public Code class CSourceCodegen : public CSourceModuleCodegenBase { public: std::pair> GenCFunc(const Function& func) { - CHECK(func.defined()) << "Input error: expect a Relay function."; + ICHECK(func.defined()) << "Input error: expect a Relay function."; // Record the external symbol for runtime lookup. auto sid = GetExtSymbol(func); @@ -260,7 +260,7 @@ class CSourceCodegen : public CSourceModuleCodegenBase { code_stream_ << operator_macro << "\n\n"; - CHECK(ref->IsInstance()); + ICHECK(ref->IsInstance()); auto res = GenCFunc(Downcast(ref)); std::string code = code_stream_.str(); @@ -269,7 +269,7 @@ class CSourceCodegen : public CSourceModuleCodegenBase { // Create a CSource module const auto* pf = runtime::Registry::Get("runtime.CSourceModuleCreate"); - CHECK(pf != nullptr) << "Cannot find csource module to create the external runtime module"; + ICHECK(pf != nullptr) << "Cannot find csource module to create the external runtime module"; return (*pf)(code, "c", sym, variables); } diff --git a/src/relay/backend/contrib/codegen_c/codegen_c.h b/src/relay/backend/contrib/codegen_c/codegen_c.h index 0d395b7977b2..9448b4d0738d 100644 --- a/src/relay/backend/contrib/codegen_c/codegen_c.h +++ b/src/relay/backend/contrib/codegen_c/codegen_c.h @@ -85,7 +85,7 @@ class CodegenCBase { * \brief Exit a scope. */ void ExitScope() { - CHECK_GE(indent_, 2U) << "Wrong ident found."; + ICHECK_GE(indent_, 2U) << "Wrong ident found."; indent_ -= 2; } @@ -262,7 +262,7 @@ class CodegenCBase { */ std::string GetDtypeString(const Var& var) { auto ttype = var->checked_type().as(); - CHECK(ttype) << "Expect TensorTypeNode"; + ICHECK(ttype) << "Expect TensorTypeNode"; return GetDtypeString(ttype); } @@ -297,7 +297,7 @@ class CodegenCBase { */ std::string CreateInitChecker(const std::string& symbol) const { std::ostringstream oss; - oss << "CHECK(!" << symbol + oss << "ICHECK(!" << symbol << "_consts.empty()) << \"C source module hasn't been initialized.\";\n"; return oss.str(); } diff --git a/src/relay/backend/contrib/codegen_json/codegen_json.h b/src/relay/backend/contrib/codegen_json/codegen_json.h index 9ed15a88c72a..859ef8c9bdb2 100644 --- a/src/relay/backend/contrib/codegen_json/codegen_json.h +++ b/src/relay/backend/contrib/codegen_json/codegen_json.h @@ -197,8 +197,8 @@ class JSONSerializer : public MemoizedExprTranslator()) { for (size_t i = 0; i < tuple_type->fields.size(); ++i) { const auto* tensor_type = tuple_type->fields[i].as(); - CHECK(tensor_type) << "Expect TensorType, but received: ." - << tuple_type->fields[i]->GetTypeKey(); + ICHECK(tensor_type) << "Expect TensorType, but received: ." + << tuple_type->fields[i]->GetTypeKey(); ret.push_back(JSONGraphNodeEntry(node_id, i)); shape.emplace_back(GetIntShape(tensor_type->shape)); dtype.emplace_back(DType2String(tensor_type->dtype)); @@ -206,7 +206,7 @@ class JSONSerializer : public MemoizedExprTranslatorSetNumOutput(tuple_type->fields.size()); } else { const auto* tensor_type = checked_type.as(); - CHECK(tensor_type) << "Expect TensorType, but received: " << checked_type->GetTypeKey(); + ICHECK(tensor_type) << "Expect TensorType, but received: " << checked_type->GetTypeKey(); shape.emplace_back(GetIntShape(tensor_type->shape)); dtype.emplace_back(DType2String(tensor_type->dtype)); ret.push_back(JSONGraphNodeEntry(node_id, 0)); @@ -228,7 +228,7 @@ class JSONSerializer : public MemoizedExprTranslator(call_attr)); } else if (const auto* fn = cn->op.as()) { auto pattern = fn->GetAttr(attr::kPartitionedFromPattern); - CHECK(pattern.defined()); + ICHECK(pattern.defined()); std::vector values; values.push_back(pattern.value()); std::vector attr; @@ -243,7 +243,7 @@ class JSONSerializer : public MemoizedExprTranslator VisitExpr_(const VarNode* vn) { - CHECK(memo_.count(GetRef(vn))); + ICHECK(memo_.count(GetRef(vn))); return memo_[GetRef(vn)]; } @@ -270,7 +270,7 @@ class JSONSerializer : public MemoizedExprTranslatorname; } else if (const auto* fn = cn->op.as()) { auto comp = fn->GetAttr(attr::kComposite); - CHECK(comp.defined()) << "JSON runtime only supports composite functions."; + ICHECK(comp.defined()) << "JSON runtime only supports composite functions."; name = comp.value(); } else { LOG(FATAL) << "JSON runtime does not support calls to " << cn->op->GetTypeKey(); @@ -289,7 +289,7 @@ class JSONSerializer : public MemoizedExprTranslator VisitExpr_(const LetNode* ln) { - CHECK_EQ(memo_.count(ln->var), 0); + ICHECK_EQ(memo_.count(ln->var), 0); memo_[ln->var] = VisitExpr(ln->value); return VisitExpr(ln->body); } @@ -300,7 +300,7 @@ class JSONSerializer : public MemoizedExprTranslator VisitExpr_(const FunctionNode* fn) { - CHECK(fn->GetAttr(attr::kComposite).defined()) + ICHECK(fn->GetAttr(attr::kComposite).defined()) << "JSON runtime only supports composite functions"; // FunctionNode should be handled by the caller. return {}; diff --git a/src/relay/backend/contrib/dnnl/codegen.cc b/src/relay/backend/contrib/dnnl/codegen.cc index bec9af0cf83f..bfc5c77d116b 100644 --- a/src/relay/backend/contrib/dnnl/codegen.cc +++ b/src/relay/backend/contrib/dnnl/codegen.cc @@ -57,7 +57,7 @@ inline size_t GetShape1DSize(const Type& type) { std::vector Conv2d(const CallNode* call) { std::vector args; const auto* conv2d_attr = call->attrs.as(); - CHECK(conv2d_attr); + ICHECK(conv2d_attr); auto ishape = GetShape(call->args[0]->checked_type()); auto wshape = GetShape(call->args[1]->checked_type()); @@ -155,7 +155,7 @@ class CodegenDNNL : public MemoizedExprTranslator>, public C std::vector outs; for (auto field : node->fields) { auto res = VisitExpr(field); - CHECK_EQ(res.size(), 1U) << "Do not support tuple nest"; + ICHECK_EQ(res.size(), 1U) << "Do not support tuple nest"; outs.push_back(res[0]); } return outs; @@ -163,7 +163,7 @@ class CodegenDNNL : public MemoizedExprTranslator>, public C std::vector VisitExpr_(const TupleGetItemNode* op) final { auto res = VisitExpr(op->tuple); - CHECK_GT(res.size(), static_cast(op->index)); + ICHECK_GT(res.size(), static_cast(op->index)); // Only keep the item we want for the child node. // FIXME(@comaniac): The other items should still be requried for the primary outputs. @@ -190,8 +190,8 @@ class CodegenDNNL : public MemoizedExprTranslator>, public C const_idx_++; const auto* type_node = cn->checked_type().as(); - CHECK(type_node); - CHECK_EQ(GetDtypeString(type_node), "float") << "Only float is supported for now."; + ICHECK(type_node); + ICHECK_EQ(GetDtypeString(type_node), "float") << "Only float is supported for now."; return {output}; } @@ -233,7 +233,7 @@ class CodegenDNNL : public MemoizedExprTranslator>, public C GenerateBodyOutput GenerateOpCall(const CallNode* call) { const auto* op_node = call->op.as(); - CHECK(op_node) << "Expect OpNode, but got " << call->op->GetTypeKey(); + ICHECK(op_node) << "Expect OpNode, but got " << call->op->GetTypeKey(); using ArgFunType = std::function(const CallNode*)>; static const std::map> op_map = { @@ -257,7 +257,7 @@ class CodegenDNNL : public MemoizedExprTranslator>, public C GenerateBodyOutput GenerateCompositeFunctionCall(const FunctionNode* callee, const CallNode* caller) { const auto pattern_name = callee->GetAttr(attr::kComposite); - CHECK(pattern_name.defined()) << "Only functions with composite attribute supported"; + ICHECK(pattern_name.defined()) << "Only functions with composite attribute supported"; if (pattern_name == "dnnl.conv2d_bias_relu") { const auto* conv_call = @@ -283,7 +283,7 @@ class CodegenDNNL : public MemoizedExprTranslator>, public C const std::vector& func_args, const std::vector& attribute_args) { // Make function call with input buffers when visiting arguments - CHECK_GT(func_args.size(), 0); + ICHECK_GT(func_args.size(), 0); std::ostringstream decl_stream; decl_stream << "(" << func_args[0]; for (size_t i = 1; i < func_args.size(); ++i) { @@ -295,11 +295,11 @@ class CodegenDNNL : public MemoizedExprTranslator>, public C if (root_call->checked_type()->IsInstance()) { auto type_node = root_call->checked_type().as(); for (auto field : type_node->fields) { - CHECK(field->IsInstance()); + ICHECK(field->IsInstance()); out_types.push_back(field); } } else if (root_call->checked_type()->IsInstance()) { - CHECK(root_call->checked_type()->IsInstance()); + ICHECK(root_call->checked_type()->IsInstance()); out_types.push_back(root_call->checked_type()); } else { LOG(FATAL) << "Unrecognized type node: " << AsText(root_call->checked_type(), false); @@ -363,7 +363,7 @@ class DNNLModuleCodegen : public CSourceModuleCodegenBase { public: // Create a corresponding DNNL function for the given relay Function. std::pair> GenDNNLFunc(const Function& func) { - CHECK(func.defined()) << "Input error: expect a Relay function."; + ICHECK(func.defined()) << "Input error: expect a Relay function."; // Record the external symbol for runtime lookup. auto sid = GetExtSymbol(func); @@ -404,7 +404,7 @@ class DNNLModuleCodegen : public CSourceModuleCodegenBase { code_stream_ << "using namespace tvm::runtime::contrib;\n"; code_stream_ << "\n"; - CHECK(ref->IsInstance()); + ICHECK(ref->IsInstance()); auto res = GenDNNLFunc(Downcast(ref)); std::string code = code_stream_.str(); String sym = std::get<0>(res); @@ -412,7 +412,7 @@ class DNNLModuleCodegen : public CSourceModuleCodegenBase { // Create a CSource module const auto* pf = runtime::Registry::Get("runtime.CSourceModuleCreate"); - CHECK(pf != nullptr) << "Cannot find csource module to create the external runtime module"; + ICHECK(pf != nullptr) << "Cannot find csource module to create the external runtime module"; return (*pf)(code, "c", sym, variables); } @@ -441,14 +441,14 @@ class DNNLJSONSerializer : public backend::contrib::JSONSerializer { name = op_node->name; } else if (const auto* fn = cn->op.as()) { auto comp = fn->GetAttr(attr::kComposite); - CHECK(comp.defined()) << "DNNL JSON runtime only supports composite functions."; + ICHECK(comp.defined()) << "DNNL JSON runtime only supports composite functions."; name = comp.value(); if (name == "dnnl.conv2d_bias_relu") { call = GetRootCall(fn->body.as(), 2, {"nn.conv2d", "add", "nn.relu"}); } else if (name == "dnnl.conv2d_relu") { call = GetRootCall(fn->body.as(), 1, {"nn.conv2d", "nn.relu"}); - CHECK(call->op.as()) << "Not op node"; + ICHECK(call->op.as()) << "Not op node"; } else { LOG(FATAL) << "Unrecognized DNNL pattern: " << name; } @@ -476,7 +476,7 @@ class DNNLJSONSerializer : public backend::contrib::JSONSerializer { */ runtime::Module DNNLCompiler(const ObjectRef& ref) { #ifdef USE_JSON_RUNTIME - CHECK(ref->IsInstance()); + ICHECK(ref->IsInstance()); auto func = Downcast(ref); auto func_name = GetExtSymbol(func); DNNLJSONSerializer serializer(func_name, func); @@ -485,7 +485,7 @@ runtime::Module DNNLCompiler(const ObjectRef& ref) { auto params = serializer.GetParams(); const auto* pf = runtime::Registry::Get("runtime.DNNLJSONRuntimeCreate"); - CHECK(pf != nullptr) << "Cannot find JSON runtime module to create"; + ICHECK(pf != nullptr) << "Cannot find JSON runtime module to create"; auto mod = (*pf)(func_name, graph_json, params); return mod; #else diff --git a/src/relay/backend/contrib/ethosn/codegen.cc b/src/relay/backend/contrib/ethosn/codegen.cc index dd92c6bfe723..3097a300a0d9 100644 --- a/src/relay/backend/contrib/ethosn/codegen.cc +++ b/src/relay/backend/contrib/ethosn/codegen.cc @@ -43,7 +43,7 @@ sl::TensorInfo GetTensorInfo(std::map> tensor_ bool IsEthosnOp(const Call& call, const std::string& op_name) { if (call->op->IsInstance()) { Op op = Downcast(call->op); - CHECK(op.defined()); + ICHECK(op.defined()); return op == Op::Get(op_name); } else { return false; @@ -53,7 +53,7 @@ bool IsEthosnOp(const Call& call, const std::string& op_name) { bool IsEthosnFunc(const Call& call, const std::string& op_name) { if (call->op->IsInstance()) { Function func = Downcast(call->op); - CHECK(func.defined()); + ICHECK(func.defined()); auto name_node = func->GetAttr(attr::kComposite); return name_node.value() == op_name; } @@ -62,7 +62,7 @@ bool IsEthosnFunc(const Call& call, const std::string& op_name) { std::map> InferTensorsVisitor::Infer(const Expr& expr) { tensor_table_.clear(); - CHECK(expr->checked_type().defined()); + ICHECK(expr->checked_type().defined()); size_t output_size = 1; if (auto tuple = expr->checked_type().as()) { output_size = tuple->fields.size(); @@ -162,7 +162,7 @@ void InferTensorsVisitor::VisitExpr_(const CallNode* cn) { void InferTensorsVisitor::VisitExpr_(const TupleNode* tn) { auto tuple = GetRef(tn); - CHECK(tensor_table_.find(tuple) != tensor_table_.end()); + ICHECK(tensor_table_.find(tuple) != tensor_table_.end()); for (size_t i = 0; i < tn->fields.size(); i++) { tensor_table_[tn->fields[i]] = {tensor_table_[tuple][i]}; } @@ -176,7 +176,7 @@ void InferTensorsVisitor::VisitExpr_(const TupleGetItemNode* tgn) { // Don't assume it must be targeting a TupleNode // Vars and calls can still have TupleType auto tg = GetRef(tgn); - CHECK(tensor_table_.find(tg) != tensor_table_.end()); + ICHECK(tensor_table_.find(tg) != tensor_table_.end()); auto tuple = tg->tuple; auto type = tuple->checked_type().as(); int index = tg->index; @@ -517,7 +517,7 @@ runtime::Module EthosnCompiler::CreateRuntimeModule(const ObjectRef& ref) { IRModule mod; Function func = Downcast(ref); auto name_node = func->GetAttr(tvm::attr::kGlobalSymbol); - CHECK(name_node.defined()) << "Failed to retrieved external symbol."; + ICHECK(name_node.defined()) << "Failed to retrieved external symbol."; GlobalVar gvar = GlobalVar(name_node.value()); mod->Add(gvar, func); Function mod_func = Downcast(mod->functions.at(gvar)); @@ -539,7 +539,7 @@ runtime::ethosn::OrderedCompiledNetwork EthosnCompiler::CompileEthosnFunc(const // Finally compile the network std::vector> compiled_networks = sl::Compile(*network_with_ids.network, options); - CHECK_GE(compiled_networks.size(), 1) << "Ethos-N compiler failed to compile network"; + ICHECK_GE(compiled_networks.size(), 1) << "Ethos-N compiler failed to compile network"; auto compiled_network = std::move(compiled_networks[0]); // Determine the order that the inputs/outputs are in and how that corresponds to the // order that the TVM runtime will expect them in diff --git a/src/relay/backend/contrib/tensorrt/codegen.cc b/src/relay/backend/contrib/tensorrt/codegen.cc index f692da3f31ac..26f674dcd7b5 100644 --- a/src/relay/backend/contrib/tensorrt/codegen.cc +++ b/src/relay/backend/contrib/tensorrt/codegen.cc @@ -109,7 +109,7 @@ class TensorRTJSONSerializer : public backend::contrib::JSONSerializer { void SetPadNodeAttribute(std::shared_ptr node, const CallNode* cn) { const auto* pad_attr = cn->attrs.as(); - CHECK(pad_attr); + ICHECK(pad_attr); auto p = pad_attr->pad_width; const int dim_h = (p.size() == 5) ? 3 : 2; const int dim_w = (p.size() == 5) ? 4 : 3; @@ -124,7 +124,7 @@ class TensorRTJSONSerializer : public backend::contrib::JSONSerializer { void SetStridedSliceNodeAttribute(std::shared_ptr node, const CallNode* cn) { const auto* attrs = cn->attrs.as(); - CHECK(attrs && attrs->begin && attrs->end && attrs->strides) + ICHECK(attrs && attrs->begin && attrs->end && attrs->strides) << "StridedSlice must have static begin, end, and strides."; const bool default_strides = !attrs->strides.value().defined() || attrs->strides.value().size() == 0; @@ -145,10 +145,10 @@ class TensorRTJSONSerializer : public backend::contrib::JSONSerializer { !attrs->strides.value()[i].defined()) ? 1 : attrs->strides.value()[i].as()->value; - CHECK_GT(stride_value, 0); + ICHECK_GT(stride_value, 0); const int size_value = (end_value - begin_value + stride_value - 1) / stride_value; - CHECK_GE(begin_value, 0); - CHECK_GT(size_value, 0); + ICHECK_GE(begin_value, 0); + ICHECK_GT(size_value, 0); start.push_back(std::to_string(begin_value)); size.push_back(std::to_string(size_value)); strides.push_back(std::to_string(stride_value)); @@ -168,7 +168,7 @@ class TensorRTJSONSerializer : public backend::contrib::JSONSerializer { if (!cfg.defined()) { cfg = AttrsWithDefaultValues(); } - CHECK_EQ(cfg.value()->tensorrt_version.size(), 3); + ICHECK_EQ(cfg.value()->tensorrt_version.size(), 3); std::vector tensorrt_version = {std::to_string(cfg.value()->tensorrt_version[0]), std::to_string(cfg.value()->tensorrt_version[1]), std::to_string(cfg.value()->tensorrt_version[2])}; @@ -190,7 +190,7 @@ class TensorRTJSONSerializer : public backend::contrib::JSONSerializer { * \return A runtime module. */ runtime::Module TensorRTCompiler(const ObjectRef& ref) { - CHECK(ref->IsInstance()) << "The input ref is expected to be a Relay function."; + ICHECK(ref->IsInstance()) << "The input ref is expected to be a Relay function."; Function func = Downcast(ref); std::string func_name = backend::GetExtSymbol(func); @@ -199,7 +199,7 @@ runtime::Module TensorRTCompiler(const ObjectRef& ref) { std::string graph_json = serializer.GetJSON(); auto param_names = serializer.GetParams(); const auto* pf = runtime::Registry::Get("runtime.tensorrt_runtime_create"); - CHECK(pf != nullptr) << "Cannot find TensorRT runtime module create function."; + ICHECK(pf != nullptr) << "Cannot find TensorRT runtime module create function."; runtime::Module lib = (*pf)(func_name, graph_json, param_names); return lib; } diff --git a/src/relay/backend/graph_plan_memory.cc b/src/relay/backend/graph_plan_memory.cc index 2b08f45b2582..bf58c8d5be41 100644 --- a/src/relay/backend/graph_plan_memory.cc +++ b/src/relay/backend/graph_plan_memory.cc @@ -83,7 +83,7 @@ class StorageAllocaBaseVisitor : public ExprVisitor { std::vector fields; for (Expr field : op->fields) { auto tok = GetToken(field); - CHECK_EQ(tok.size(), 1U); + ICHECK_EQ(tok.size(), 1U); fields.push_back(tok[0]); } token_map_[op] = fields; @@ -91,7 +91,7 @@ class StorageAllocaBaseVisitor : public ExprVisitor { void VisitExpr_(const TupleGetItemNode* op) final { const auto& tok = GetToken(op->tuple); - CHECK_LT(static_cast(op->index), tok.size()); + ICHECK_LT(static_cast(op->index), tok.size()); token_map_[op] = {tok[op->index]}; } @@ -115,7 +115,7 @@ class StorageAllocaBaseVisitor : public ExprVisitor { const std::vector& GetToken(const Expr& expr) { this->VisitExpr(expr); auto it = token_map_.find(expr.operator->()); - CHECK(it != token_map_.end()); + ICHECK(it != token_map_.end()); return it->second; } /*! @@ -142,14 +142,14 @@ class StorageAllocaInit : protected StorageAllocaBaseVisitor { using StorageAllocaBaseVisitor::VisitExpr_; void CreateToken(const ExprNode* op, bool can_realloc) final { - CHECK(!token_map_.count(op)); + ICHECK(!token_map_.count(op)); std::vector tokens; int device_type = node_device_map_.count(GetRef(op)) ? node_device_map_[GetRef(op)]->value : 0; if (const auto* tuple_type = op->checked_type().as()) { for (Type t : tuple_type->fields) { const auto* ttype = t.as(); - CHECK(ttype); + ICHECK(ttype); StorageToken* token = arena_->make(); token->ttype = ttype; token->device_type = device_type; @@ -157,7 +157,7 @@ class StorageAllocaInit : protected StorageAllocaBaseVisitor { } } else { const auto* ttype = op->checked_type().as(); - CHECK(ttype); + ICHECK(ttype); StorageToken* token = arena_->make(); token->ttype = ttype; token->device_type = device_type; @@ -233,9 +233,9 @@ class StorageAllocator : public StorageAllocaBaseVisitor { using StorageAllocaBaseVisitor::VisitExpr_; // override create token by getting token as prototype requirements. void CreateToken(const ExprNode* op, bool can_realloc) final { - CHECK(!token_map_.count(op)); + ICHECK(!token_map_.count(op)); auto it = prototype_.find(op); - CHECK(it != prototype_.end()); + ICHECK(it != prototype_.end()); std::vector tokens; for (StorageToken* tok : it->second) { if (can_realloc) { @@ -286,12 +286,12 @@ class StorageAllocator : public StorageAllocaBaseVisitor { */ size_t GetMemorySize(StorageToken* prototype) { const TensorTypeNode* ttype = prototype->ttype; - CHECK(ttype != nullptr); + ICHECK(ttype != nullptr); size_t size = 1; for (IndexExpr dim : ttype->shape) { const int64_t* pval = tir::as_const_int(dim); - CHECK(pval != nullptr) << "Cannot allocate memory symbolic tensor shape " << ttype->shape; - CHECK_GE(*pval, 0) << "Cannot allocate memory for tensor with negative shape" << *pval; + ICHECK(pval != nullptr) << "Cannot allocate memory symbolic tensor shape " << ttype->shape; + ICHECK_GE(*pval, 0) << "Cannot allocate memory for tensor with negative shape" << *pval; size *= static_cast(pval[0]); } size *= DivRoundUp(ttype->dtype.bits() * ttype->dtype.lanes(), 8); @@ -316,7 +316,7 @@ class StorageAllocator : public StorageAllocaBaseVisitor { for (auto it = mid; it != end; ++it) { StorageToken* tok = it->second; if (tok->device_type != prototype->device_type) continue; - CHECK_EQ(tok->ref_counter, 0); + ICHECK_EQ(tok->ref_counter, 0); // Use exect matching strategy tok->max_bytes = std::max(size, tok->max_bytes); tok->ref_counter = prototype->ref_counter; @@ -329,7 +329,7 @@ class StorageAllocator : public StorageAllocaBaseVisitor { --it; StorageToken* tok = it->second; if (tok->device_type != prototype->device_type) continue; - CHECK_EQ(tok->ref_counter, 0); + ICHECK_EQ(tok->ref_counter, 0); // Use exect matching strategy tok->max_bytes = std::max(size, tok->max_bytes); tok->ref_counter = prototype->ref_counter; @@ -356,8 +356,8 @@ class StorageAllocator : public StorageAllocaBaseVisitor { * \param tok The token to be released. */ void CheckForRelease(StorageToken* tok) { - CHECK_GE(tok->storage_id, 0); - CHECK_GE(tok->ref_counter, 0); + ICHECK_GE(tok->storage_id, 0); + ICHECK_GE(tok->ref_counter, 0); if (tok->ref_counter == 0) { free_.insert({tok->max_bytes, tok}); } diff --git a/src/relay/backend/graph_runtime_codegen.cc b/src/relay/backend/graph_runtime_codegen.cc index acc99c51b69b..7b71e34b777b 100644 --- a/src/relay/backend/graph_runtime_codegen.cc +++ b/src/relay/backend/graph_runtime_codegen.cc @@ -243,9 +243,9 @@ class GraphRuntimeCodegen : public backend::MemoizedExprTranslator AddNode(GraphObjectPtr node, Expr expr) { auto checked_type = expr->checked_type(); size_t count = storage_device_map_.count(expr); - CHECK_GT(count, 0) << "Expr is not existing in storage plan"; + ICHECK_GT(count, 0) << "Expr is not existing in storage plan"; auto storage_device_info = storage_device_map_[expr]; - CHECK_EQ(storage_device_info.size(), 2); + ICHECK_EQ(storage_device_info.size(), 2); // storage std::vector storage_info; for (auto& v : storage_device_info[0]) { @@ -282,7 +282,7 @@ class GraphRuntimeCodegen : public backend::MemoizedExprTranslatorGetTypeKey() << " not supported"; } } - CHECK_EQ(node->Type(), kGraphOpNode); + ICHECK_EQ(node->Type(), kGraphOpNode); auto op_nd = std::dynamic_pointer_cast(node); op_nd->attrs_["shape"] = shape; op_nd->attrs_["dtype"] = dtype; @@ -367,7 +367,7 @@ class GraphRuntimeCodegen : public backend::MemoizedExprTranslatorfunc_name, ext_func->func_name); } - CHECK_GE(storage_device_map_.count(expr), 0); + ICHECK_GE(storage_device_map_.count(expr), 0); auto& device_type = storage_device_map_[expr][1]; auto call_dev_type = device_type[0]->value; // Normal Relay Function @@ -410,7 +410,7 @@ class GraphRuntimeCodegen : public backend::MemoizedExprTranslator VisitExpr_(const LetNode* op) override { - CHECK_EQ(var_map_.count(op->var.get()), 0); + ICHECK_EQ(var_map_.count(op->var.get()), 0); var_map_[op->var.get()] = VisitExpr(op->value); return VisitExpr(op->body); } @@ -431,7 +431,7 @@ class GraphRuntimeCodegen : public backend::MemoizedExprTranslator VisitExpr_(const FunctionNode* op) override { - CHECK(op->GetAttr(attr::kCompiler).defined()) + ICHECK(op->GetAttr(attr::kCompiler).defined()) << "Only functions supported by custom codegen"; return {}; } @@ -479,7 +479,7 @@ class GraphRuntimeCodegen : public backend::MemoizedExprTranslator>(node->attrs_["storage_id"]); const auto& dtype_vec = dmlc::get>(node->attrs_["dtype"]); - CHECK_EQ(node->num_outputs_, shape_vec.size()); + ICHECK_EQ(node->num_outputs_, shape_vec.size()); num_entry += node->num_outputs_; shapes.insert(shapes.end(), shape_vec.begin(), shape_vec.end()); @@ -556,14 +556,14 @@ class GraphRuntimeCodegenModule : public runtime::ModuleNode { virtual PackedFunc GetFunction(const std::string& name, const ObjectPtr& sptr_to_self) { if (name == "init") { return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { - CHECK_EQ(args.num_args, 2) << "The expected of arguments are: " - << "runtime::Module mod and Map targets"; + ICHECK_EQ(args.num_args, 2) << "The expected of arguments are: " + << "runtime::Module mod and Map targets"; void* mod = args[0]; Map tmp = args[1]; TargetsMap targets; for (const auto& it : tmp) { auto dev_type = it.first.as(); - CHECK(dev_type); + ICHECK(dev_type); targets[dev_type->value] = it.second; } codegen_ = @@ -588,7 +588,7 @@ class GraphRuntimeCodegenModule : public runtime::ModuleNode { } else if (name == "get_param_by_name") { return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { String key = args[0]; - CHECK_GT(this->output_.params.count(key), 0); + ICHECK_GT(this->output_.params.count(key), 0); *rv = this->output_.params[key]; }); } else if (name == "get_irmodule") { diff --git a/src/relay/backend/interpreter.cc b/src/relay/backend/interpreter.cc index e58c23b76670..993fb1a62787 100644 --- a/src/relay/backend/interpreter.cc +++ b/src/relay/backend/interpreter.cc @@ -54,7 +54,7 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) inline const PackedFunc& GetPackedFunc(const std::string& name) { const PackedFunc* pf = tvm::runtime::Registry::Get(name); - CHECK(pf != nullptr) << "Cannot find function " << name << " in registry"; + ICHECK(pf != nullptr) << "Cannot find function " << name << " in registry"; return *pf; } @@ -347,12 +347,12 @@ class Interpreter : public ExprFunctor, } } } - CHECK_EQ(arg_counter, cfunc->inputs.size()) << "Shape function input sizes mismatch"; + ICHECK_EQ(arg_counter, cfunc->inputs.size()) << "Shape function input sizes mismatch"; auto fset_shape_output = [&](size_t i, Type val_type) { // TODO(@icemelon): allow recursive tuple const TensorTypeNode* rtype = val_type.as(); - CHECK(rtype != nullptr); + ICHECK(rtype != nullptr); int64_t ndim = rtype->shape.size(); auto arr = NDArray::Empty({ndim}, DataType::Int(64), cpu_ctx); outputs[i] = arr; @@ -371,7 +371,7 @@ class Interpreter : public ExprFunctor, auto tt = Downcast(ret_type); fset_shape_output(0, tt); } - CHECK_EQ(cfunc->outputs.size(), out_cnt) << "Shape function output sizes mismatch"; + ICHECK_EQ(cfunc->outputs.size(), out_cnt) << "Shape function output sizes mismatch"; PackedFunc shape_func; Module m; @@ -428,7 +428,7 @@ class Interpreter : public ExprFunctor, if (const auto* tuple_type = func->body->checked_type().as()) { arg_len += tuple_type->fields.size(); } else { - CHECK(func->body->checked_type().as()) << func->body->checked_type(); + ICHECK(func->body->checked_type().as()) << func->body->checked_type(); arg_len += 1; } std::vector values(arg_len); @@ -439,7 +439,7 @@ class Interpreter : public ExprFunctor, const auto nd_array = Downcast(val); setter(i, nd_array); DLContext arg_ctx = nd_array->ctx; - CHECK(arg_ctx.device_type == context_.device_type && arg_ctx.device_id == context_.device_id) + ICHECK(arg_ctx.device_type == context_.device_type && arg_ctx.device_id == context_.device_id) << "Interpreter expect context to be " << context_ << ", but get " << arg_ctx; }; @@ -461,12 +461,12 @@ class Interpreter : public ExprFunctor, // return type. auto fset_output = [&](size_t i, Type val_type) { const TensorTypeNode* rtype = val_type.as(); - CHECK(rtype != nullptr); + ICHECK(rtype != nullptr); // Allocate output tensor. std::vector shape; for (auto dim : rtype->shape) { const auto* ivalue = tir::as_const_int(dim); - CHECK(ivalue) << "expected concrete dimensions"; + ICHECK(ivalue) << "expected concrete dimensions"; shape.push_back(ivalue[0]); } DLDataType dtype = rtype->dtype; @@ -480,14 +480,14 @@ class Interpreter : public ExprFunctor, bool is_dyn = IsDynamic(ret_type); if (is_dyn) { - CHECK(func->HasNonzeroAttr(attr::kPrimitive)); + ICHECK(func->HasNonzeroAttr(attr::kPrimitive)); out_shapes = ComputeDynamicShape(func, args); } PackedFunc packed_func = engine_->JIT(CCacheKey(func, target_)); TVMRetValue rv; if (const TupleTypeNode* rtype = func->body->checked_type().as()) { - CHECK(!is_dyn || out_shapes.size() == rtype->fields.size()); + ICHECK(!is_dyn || out_shapes.size() == rtype->fields.size()); std::vector fields; for (size_t i = 0; i < rtype->fields.size(); ++i) { if (is_dyn) { @@ -503,7 +503,7 @@ class Interpreter : public ExprFunctor, } else { ObjectRef out_tensor; if (is_dyn) { - CHECK_EQ(out_shapes.size(), 1); + ICHECK_EQ(out_shapes.size(), 1); auto sh = out_shapes[0]; auto tt = Downcast(ret_type); out_tensor = fset_output(0, TensorType(sh, tt->dtype)); @@ -526,16 +526,16 @@ class Interpreter : public ExprFunctor, // Allocate a frame with the parameters and free variables. tvm::Map locals; - CHECK_EQ(func->params.size(), args.size()); + ICHECK_EQ(func->params.size(), args.size()); for (size_t i = 0; i < func->params.size(); i++) { - CHECK_EQ(locals.count(func->params[i]), 0); + ICHECK_EQ(locals.count(func->params[i]), 0); locals.Set(func->params[i], args[i]); } // Add the var to value mappings from the Closure's environment. for (auto it = closure->env.begin(); it != closure->env.end(); ++it) { - CHECK_EQ(locals.count((*it).first), 0); + ICHECK_EQ(locals.count((*it).first), 0); locals.Set((*it).first, (*it).second); } @@ -593,9 +593,9 @@ class Interpreter : public ExprFunctor, ObjectRef VisitExpr_(const TupleGetItemNode* op) final { ObjectRef val = Eval(op->tuple); const auto* adt_obj = val.as(); - CHECK(adt_obj) << "interal error: when evaluating TupleGetItem expected an ADT value"; + ICHECK(adt_obj) << "interal error: when evaluating TupleGetItem expected an ADT value"; auto adt = GetRef(adt_obj); - CHECK_LT(static_cast(op->index), adt.size()) << "internal error: index out of bounds"; + ICHECK_LT(static_cast(op->index), adt.size()) << "internal error: index out of bounds"; return adt[op->index]; } @@ -607,7 +607,7 @@ class Interpreter : public ExprFunctor, cpu_ctx.device_type = kDLCPU; cpu_ctx.device_id = 0; NDArray cpu_array = nd_array.CopyTo(cpu_ctx); - CHECK_EQ(DataType(cpu_array->dtype), DataType::Bool()); + ICHECK_EQ(DataType(cpu_array->dtype), DataType::Bool()); // TODO(@jroesch, @MK): Refactor code into helper from DCE. if (reinterpret_cast(cpu_array->data)[0]) { return Eval(op->true_branch); @@ -656,11 +656,11 @@ class Interpreter : public ExprFunctor, bool VisitPattern_(const PatternConstructorNode* op, const ObjectRef& v) final { const ConstructorValueObj* cvn = v.as(); - CHECK(cvn) << "need to be a constructor for match"; - CHECK_NE(op->constructor->tag, -1); - CHECK_NE(cvn->tag, -1); + ICHECK(cvn) << "need to be a constructor for match"; + ICHECK_NE(op->constructor->tag, -1); + ICHECK_NE(cvn->tag, -1); if (op->constructor->tag == cvn->tag) { - CHECK_EQ(op->patterns.size(), cvn->fields.size()); + ICHECK_EQ(op->patterns.size(), cvn->fields.size()); for (size_t i = 0; i < op->patterns.size(); ++i) { if (!VisitPattern(op->patterns[i], cvn->fields[i])) { return false; @@ -673,7 +673,7 @@ class Interpreter : public ExprFunctor, bool VisitPattern_(const PatternTupleNode* op, const ObjectRef& v) final { auto adt = Downcast(v); - CHECK_EQ(op->patterns.size(), adt.size()); + ICHECK_EQ(op->patterns.size(), adt.size()); for (size_t i = 0; i < op->patterns.size(); ++i) { if (!VisitPattern(op->patterns[i], adt[i])) { return false; @@ -730,7 +730,7 @@ TypedPackedFunc CreateInterpreter(IRModule mod, DLContext conte auto intrp = std::make_shared(mod, context, target); auto packed = [intrp](Expr expr) { auto f = DetectFeature(expr); - CHECK(f.is_subset_of(FeatureSet::All() - fGraph)); + ICHECK(f.is_subset_of(FeatureSet::All() - fGraph)); return intrp->Eval(expr); }; return TypedPackedFunc(packed); diff --git a/src/relay/backend/param_dict.cc b/src/relay/backend/param_dict.cc index ef4b6589bdba..1d7e08abcdde 100644 --- a/src/relay/backend/param_dict.cc +++ b/src/relay/backend/param_dict.cc @@ -37,7 +37,7 @@ namespace relay { using namespace runtime; TVM_REGISTER_GLOBAL("tvm.relay._save_param_dict").set_body([](TVMArgs args, TVMRetValue* rv) { - CHECK_EQ(args.size() % 2, 0u); + ICHECK_EQ(args.size() % 2, 0u); // `args` is in the form "key, value, key, value, ..." size_t num_params = args.size() / 2; std::vector names; @@ -74,14 +74,14 @@ TVM_REGISTER_GLOBAL("tvm.relay._load_param_dict").set_body([](TVMArgs args, TVMR dmlc::MemoryStringStream memstrm(&bytes); dmlc::Stream* strm = &memstrm; uint64_t header, reserved; - CHECK(strm->Read(&header)) << "Invalid parameters file format"; - CHECK(header == kTVMNDArrayListMagic) << "Invalid parameters file format"; - CHECK(strm->Read(&reserved)) << "Invalid parameters file format"; - CHECK(strm->Read(&names)) << "Invalid parameters file format"; + ICHECK(strm->Read(&header)) << "Invalid parameters file format"; + ICHECK(header == kTVMNDArrayListMagic) << "Invalid parameters file format"; + ICHECK(strm->Read(&reserved)) << "Invalid parameters file format"; + ICHECK(strm->Read(&names)) << "Invalid parameters file format"; uint64_t sz; strm->Read(&sz, sizeof(sz)); size_t size = static_cast(sz); - CHECK(size == names.size()) << "Invalid parameters file format"; + ICHECK(size == names.size()) << "Invalid parameters file format"; tvm::Array ret; for (size_t i = 0; i < size; ++i) { tvm::runtime::NDArray temp; diff --git a/src/relay/backend/utils.h b/src/relay/backend/utils.h index 07f42266b831..3def6359c615 100644 --- a/src/relay/backend/utils.h +++ b/src/relay/backend/utils.h @@ -81,7 +81,7 @@ class MemoizedExprTranslator : public ::tvm::relay::ExprFunctorsecond; @@ -115,7 +115,7 @@ inline const PackedFunc* GetPackedFunc(const std::string& func_name) { template inline const runtime::TypedPackedFunc GetTypedPackedFunc(const std::string& func_name) { auto* pf = GetPackedFunc(func_name); - CHECK(pf != nullptr) << "can not find packed function"; + ICHECK(pf != nullptr) << "can not find packed function"; return runtime::TypedPackedFunc(*pf); } @@ -129,7 +129,7 @@ inline std::vector GetIntShape(const Array& shape) { std::vector ret; for (const auto& dim : shape) { const int64_t* pval = tir::as_const_int(dim); - CHECK(pval) << "Expect integer, but received: " << dim->GetTypeKey(); + ICHECK(pval) << "Expect integer, but received: " << dim->GetTypeKey(); ret.push_back(*pval); } return ret; @@ -192,8 +192,8 @@ inline relay::Function BindParamsByName( } Expr bound_expr = relay::Bind(func, bind_dict); Function ret = Downcast(bound_expr); - CHECK(ret.defined()) << "The returning type is expected to be a Relay Function." - << "\n"; + ICHECK(ret.defined()) << "The returning type is expected to be a Relay Function." + << "\n"; return ret; } @@ -204,11 +204,11 @@ inline relay::Function BindParamsByName( */ inline std::vector GetShape(const Type& type) { const auto* ttype = type.as(); - CHECK(ttype) << "Expect TensorTypeNode"; + ICHECK(ttype) << "Expect TensorTypeNode"; std::vector shape; for (size_t i = 0; i < ttype->shape.size(); ++i) { auto* val = ttype->shape[i].as(); - CHECK(val); + ICHECK(val); shape.push_back(val->value); } return shape; @@ -223,7 +223,7 @@ inline std::vector GetShape(const Type& type) { */ inline bool IsOp(const CallNode* call, const std::string& op_name) { const auto* op_node = call->op.as(); - CHECK(op_node) << "Expects a single op."; + ICHECK(op_node) << "Expects a single op."; Op op = GetRef(op_node); return op == Op::Get(op_name); } @@ -239,14 +239,14 @@ inline bool IsOp(const CallNode* call, const std::string& op_name) { inline const CallNode* GetRootCall(const CallNode* current_call, int depth, const std::vector& expected_op_names) { - CHECK(current_call && depth >= 0 && static_cast(depth) < expected_op_names.size() && - IsOp(current_call, expected_op_names[depth])); + ICHECK(current_call && depth >= 0 && static_cast(depth) < expected_op_names.size() && + IsOp(current_call, expected_op_names[depth])); if (depth == 0) { return current_call; } - CHECK_GT(current_call->args.size(), 0); + ICHECK_GT(current_call->args.size(), 0); const auto* next_call = current_call->args[0].as(); return GetRootCall(next_call, depth - 1, expected_op_names); @@ -260,7 +260,7 @@ inline const CallNode* GetRootCall(const CallNode* current_call, int depth, */ inline std::string GetExtSymbol(const Function& func) { const auto name_node = func->GetAttr(tvm::attr::kGlobalSymbol); - CHECK(name_node.defined()) << "Fail to retrieve external symbol."; + ICHECK(name_node.defined()) << "Fail to retrieve external symbol."; return std::string(name_node.value()); } diff --git a/src/relay/backend/vm/compiler.cc b/src/relay/backend/vm/compiler.cc index c3bf80571638..4a7e5eec17bc 100644 --- a/src/relay/backend/vm/compiler.cc +++ b/src/relay/backend/vm/compiler.cc @@ -60,19 +60,19 @@ Pass InlinePrimitives(); Pass ManifestAlloc(Target target_host, vm::TargetsMap targets) { auto f = tvm::runtime::Registry::Get("relay.transform.ManifestAlloc"); - CHECK(f != nullptr) << "unable to load allocation manifestation pass"; + ICHECK(f != nullptr) << "unable to load allocation manifestation pass"; return (*f)(target_host, targets); } Pass MemoryPlan() { auto f = tvm::runtime::Registry::Get("relay.transform.MemoryPlan"); - CHECK(f != nullptr) << "unable to load the memory planning pass"; + ICHECK(f != nullptr) << "unable to load the memory planning pass"; return (*f)(); } Pass LiftConstants() { auto f = tvm::runtime::Registry::Get("relay.transform.LiftConstants"); - CHECK(f != nullptr) << "unable to load the constant lifting pass"; + ICHECK(f != nullptr) << "unable to load the constant lifting pass"; return (*f)(); } @@ -178,7 +178,7 @@ TreeObjectPtr BuildDecisionTreeFromPattern(MatchValuePtr data, Pattern pattern, return TreeBranchNode::Make(cond, then_branch, else_branch); } else { const auto* pt = pattern.as(); - CHECK(pt) << "unhandled case: " << AsText(pattern, false); + ICHECK(pt) << "unhandled case: " << AsText(pattern, false); size_t field_index = 0; for (auto& p : pt->patterns) { auto d = std::make_shared(data, field_index++); @@ -209,10 +209,10 @@ std::vector ToAllocTensorShape(NDArray shape) { if (shape->ndim == 0) { return raw_shape; } - CHECK_EQ(shape->ndim, 1u); - CHECK_EQ(shape->dtype.code, 0U) << "The dtype of constant shape must be int32 or int64, but got " - << DLDataType2String(shape->dtype); - CHECK(shape->dtype.bits == 64 || shape->dtype.bits == 32) + ICHECK_EQ(shape->ndim, 1u); + ICHECK_EQ(shape->dtype.code, 0U) << "The dtype of constant shape must be int32 or int64, but got " + << DLDataType2String(shape->dtype); + ICHECK(shape->dtype.bits == 64 || shape->dtype.bits == 32) << "The dtype of constant shape must be int32 or int64, but got" << DLDataType2String(shape->dtype); @@ -247,7 +247,7 @@ int GetFallbackDevice() { Optional opt_fallback_dev = pass_ctx->GetConfig("relay.fallback_device_type", Integer(static_cast(kDLCPU))); auto fallback_dev = opt_fallback_dev.value(); - CHECK_GT(fallback_dev->value, 0U); + ICHECK_GT(fallback_dev->value, 0U); return fallback_dev->value; } @@ -271,7 +271,7 @@ class VMFunctionCompiler : ExprFunctor { // We then assign register num to the free variables for (auto param : func->params) { auto arg_register = NewRegister(); - CHECK_EQ(i, arg_register); + ICHECK_EQ(i, arg_register); var_register_map_.insert({param, arg_register}); params_.push_back(param->name_hint()); ++i; @@ -281,7 +281,7 @@ class VMFunctionCompiler : ExprFunctor { Function inner_func = Downcast(func->body); for (auto param : inner_func->params) { auto arg_register = NewRegister(); - CHECK_EQ(i, arg_register); + ICHECK_EQ(i, arg_register); var_register_map_.insert({param, arg_register}); params_.push_back(param->name_hint()); ++i; @@ -295,10 +295,10 @@ class VMFunctionCompiler : ExprFunctor { std::vector params_device_type; for (const auto& it : func->params) { if (!expr_device_map_.empty()) { - CHECK_GT(expr_device_map_.count(it), 0U); + ICHECK_GT(expr_device_map_.count(it), 0U); params_device_type.push_back(expr_device_map_[it].device_type); } else { - CHECK_EQ(targets_.size(), 1U); + ICHECK_EQ(targets_.size(), 1U); params_device_type.push_back((targets_.begin())->first); } } @@ -311,7 +311,7 @@ class VMFunctionCompiler : ExprFunctor { inline void Emit(const Instruction& instr) { DLOG(INFO) << "VMCompiler::Emit: instr=" << instr; - CHECK((int)instr.op < 100) << "Invalid opcode " << (int)instr.op; + ICHECK((int)instr.op < 100) << "Invalid opcode " << (int)instr.op; switch (instr.op) { case Opcode::AllocADT: case Opcode::AllocTensor: @@ -348,7 +348,7 @@ class VMFunctionCompiler : ExprFunctor { context_->const_device_type.push_back(targets_.begin()->first); } else { auto con = GetRef(const_node); - CHECK_GT(expr_device_map_.count(con), 0U); + ICHECK_GT(expr_device_map_.count(con), 0U); context_->const_device_type.push_back(expr_device_map_[con].device_type); } context_->constants.push_back(const_node->data); @@ -358,7 +358,7 @@ class VMFunctionCompiler : ExprFunctor { void VisitExpr_(const VarNode* var_node) { auto var = GetRef(var_node); auto reg_it = this->var_register_map_.find(var); - CHECK(reg_it != this->var_register_map_.end()); + ICHECK(reg_it != this->var_register_map_.end()); last_register_ = reg_it->second; } @@ -400,7 +400,7 @@ class VMFunctionCompiler : ExprFunctor { auto var = GetRef(gvar); auto func = context_->module->Lookup(var); auto it = context_->global_map.find(var); - CHECK(it != context_->global_map.end()); + ICHECK(it != context_->global_map.end()); // Allocate closure with zero free vars Emit(Instruction::AllocClosure(it->second, 0, {}, NewRegister())); } @@ -458,7 +458,7 @@ class VMFunctionCompiler : ExprFunctor { auto cfunc = engine_->LowerShapeFunc(key); int op_index = -1; // pick the only function inside the context - CHECK_EQ(cfunc->funcs->functions.size(), 1); + ICHECK_EQ(cfunc->funcs->functions.size(), 1); auto pfunc = Downcast((*cfunc->funcs->functions.begin()).second); if (context_->seen_funcs.count(pfunc) == 0) { op_index = context_->cached_funcs.size(); @@ -477,7 +477,7 @@ class VMFunctionCompiler : ExprFunctor { for (auto output : outputs) { auto reg = var_register_map_.find(Downcast(output)); - CHECK(reg != var_register_map_.end()) + ICHECK(reg != var_register_map_.end()) << "internal error: all variables should be in the register mapping"; argument_registers.push_back(reg->second); } @@ -489,16 +489,16 @@ class VMFunctionCompiler : ExprFunctor { void EmitInvokeTVMOp(const Function& func, const Expr& inputs, const Expr& outputs) { std::vector argument_registers; - CHECK(func->GetAttr(attr::kPrimitive, 0) != 0) + ICHECK(func->GetAttr(attr::kPrimitive, 0) != 0) << "internal error: invoke_tvm_op requires the first argument to be a relay::Function"; auto input_tuple = inputs.as(); - CHECK(input_tuple) << "internal error: invoke_tvm_op inputs must be a tuple," - << "please file a bug in the memory manifestation pass"; + ICHECK(input_tuple) << "internal error: invoke_tvm_op inputs must be a tuple," + << "please file a bug in the memory manifestation pass"; auto output_tuple = outputs.as(); - CHECK(output_tuple) << "internal error: invoke_tvm_op outputs must be a tuple," - << "please file a bug in the memory manifestation pass"; + ICHECK(output_tuple) << "internal error: invoke_tvm_op outputs must be a tuple," + << "please file a bug in the memory manifestation pass"; for (auto input : input_tuple->fields) { VisitExpr(input); @@ -507,7 +507,7 @@ class VMFunctionCompiler : ExprFunctor { for (auto output : output_tuple->fields) { auto reg = var_register_map_.find(Downcast(output)); - CHECK(reg != var_register_map_.end()) + ICHECK(reg != var_register_map_.end()) << "internal error: all variables should be in the register mapping"; argument_registers.push_back(reg->second); } @@ -520,11 +520,11 @@ class VMFunctionCompiler : ExprFunctor { // Next generate the invoke instruction. if (expr_device_map_.empty()) { // homogeneous execution. - CHECK_EQ(targets_.size(), 1U); + ICHECK_EQ(targets_.size(), 1U); const auto& it = targets_.begin(); target = (*it).second; } else { - CHECK_GT(expr_device_map_.count(func), 0U) + ICHECK_GT(expr_device_map_.count(func), 0U) << "Found not annotated expression, please make sure " "context analysis has been executed"; int dev_type = expr_device_map_[func].device_type; @@ -545,7 +545,7 @@ class VMFunctionCompiler : ExprFunctor { context_->cached_funcs.push_back(cfunc); } else { // TODO(jroesch): support lowered funcs for multiple targets - CHECK_EQ(cfunc->funcs->functions.size(), 1); + ICHECK_EQ(cfunc->funcs->functions.size(), 1); auto pfunc = Downcast((*cfunc->funcs->functions.begin()).second); if (context_->seen_funcs.find(pfunc) == context_->seen_funcs.end()) { op_index = context_->cached_funcs.size(); @@ -571,16 +571,16 @@ class VMFunctionCompiler : ExprFunctor { matcher .Match("vm.invoke_tvm_op", [this](const Array& args, const Attrs& attrs, const Array& type_arg) { - CHECK_EQ(args.size(), 3); + ICHECK_EQ(args.size(), 3); EmitInvokeTVMOp(Downcast(args[0]), args[1], args[2]); }) .Match("memory.alloc_tensor", [this](const Array& args, const Attrs& attrs, const Array& type_arg) { - CHECK_EQ(args.size(), 3); + ICHECK_EQ(args.size(), 3); // Get the attributes. auto alloc_attrs = attrs.as(); - CHECK(alloc_attrs != nullptr) << "must be the alloc tensor attrs"; + ICHECK(alloc_attrs != nullptr) << "must be the alloc tensor attrs"; auto dtype = alloc_attrs->dtype; // The storage will be passed dynamically. @@ -612,22 +612,22 @@ class VMFunctionCompiler : ExprFunctor { .Match("memory.alloc_storage", [this, call_node](const Array& args, const Attrs& attrs, const Array& type_arg) { - CHECK_EQ(args.size(), 2); + ICHECK_EQ(args.size(), 2); // Compute the size of the allocation. this->VisitExpr(args[0]); auto size_register = last_register_; - CHECK(args[1].as()); + ICHECK(args[1].as()); NDArray alignment_arr = args[1].as()->data; - CHECK_EQ(alignment_arr->dtype.code, 0U) + ICHECK_EQ(alignment_arr->dtype.code, 0U) << "The dtype of constant shape must be int32 or int64, but got " << DLDataType2String(alignment_arr->dtype); - CHECK_EQ(alignment_arr->dtype.bits, 64U); + ICHECK_EQ(alignment_arr->dtype.bits, 64U); Index alignment = reinterpret_cast(alignment_arr->data)[0]; // Get the dtype hint from the attributes. auto alloc_attrs = attrs.as(); - CHECK(alloc_attrs != nullptr) << "must be the AllocStorage attrs"; + ICHECK(alloc_attrs != nullptr) << "must be the AllocStorage attrs"; auto dtype = alloc_attrs->dtype; Index device_type; @@ -637,7 +637,7 @@ class VMFunctionCompiler : ExprFunctor { auto& kv = *(targets_.begin()); device_type = kv.first; } else { - CHECK_GT(expr_device_map_.count(GetRef(call_node)), 0U) + ICHECK_GT(expr_device_map_.count(GetRef(call_node)), 0U) << " The alloc_storage node is not annotated"; device_type = expr_device_map_[GetRef(call_node)].device_type; } @@ -647,7 +647,7 @@ class VMFunctionCompiler : ExprFunctor { }) .Match("vm.shape_func", [this](const Array& args, const Attrs& attrs, const Array& type_arg) { - CHECK_EQ(args.size(), 3); + ICHECK_EQ(args.size(), 3); auto shape_func = Downcast(args[0]); auto inputs = Downcast(args[1]); auto outputs = Downcast(args[2]); @@ -655,11 +655,11 @@ class VMFunctionCompiler : ExprFunctor { }) .Match("vm.shape_of", [this](const Array& args, const Attrs& attrs, const Array& type_arg) { - CHECK_EQ(args.size(), 1U); + ICHECK_EQ(args.size(), 1U); // Get the attributes. const auto* shape_of_attrs = attrs.as(); - CHECK(shape_of_attrs) << "Must be the shape_of attrs"; - CHECK_EQ(shape_of_attrs->dtype.bits(), 64) + ICHECK(shape_of_attrs) << "Must be the shape_of attrs"; + ICHECK_EQ(shape_of_attrs->dtype.bits(), 64) << "The dtype of shape of must be int64, but got" << DLDataType2String(shape_of_attrs->dtype); this->VisitExpr(args[0]); @@ -667,7 +667,7 @@ class VMFunctionCompiler : ExprFunctor { }) .Match("vm.reshape_tensor", [this](const Array& args, const Attrs& attrs, const Array& type_arg) { - CHECK_EQ(args.size(), 2u); + ICHECK_EQ(args.size(), 2u); this->VisitExpr(args[0]); auto tensor_reg = last_register_; this->VisitExpr(args[1]); @@ -676,12 +676,12 @@ class VMFunctionCompiler : ExprFunctor { }) .Match("device_copy", [this](const Array& args, const Attrs& attrs, const Array& type_arg) { - CHECK_EQ(args.size(), 1U); + ICHECK_EQ(args.size(), 1U); this->VisitExpr(args[0]); auto src_reg = last_register_; auto device_copy_attrs = attrs.as(); - CHECK(device_copy_attrs != nullptr) << "Must be the device copy attrs"; + ICHECK(device_copy_attrs != nullptr) << "Must be the device copy attrs"; Index src_device_type = device_copy_attrs->src_dev_type; Index dst_device_type = device_copy_attrs->dst_dev_type; Emit(Instruction::DeviceCopy(src_reg, src_device_type, dst_device_type, @@ -711,7 +711,7 @@ class VMFunctionCompiler : ExprFunctor { // calling convention. auto global = GetRef(global_node); auto it = context_->global_map.find(global); - CHECK(it != context_->global_map.end()); + ICHECK(it != context_->global_map.end()); DLOG(INFO) << "VisitExpr_: generating invoke for " << global->name_hint << " with func_index=" << it->second; @@ -855,13 +855,13 @@ class VMFunctionCompiler : ExprFunctor { PackedFunc VMCompiler::GetFunction(const std::string& name, const ObjectPtr& sptr_to_self) { if (name == "lower") { return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { - CHECK_EQ(args.num_args, 3); + ICHECK_EQ(args.num_args, 3); IRModule mod = args[0]; this->Lower(mod, args[1], args[2]); }); } else if (name == "codegen") { return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { - CHECK_EQ(args.num_args, 0); + ICHECK_EQ(args.num_args, 0); this->Codegen(); }); } else if (name == "get_executable") { @@ -884,7 +884,7 @@ PackedFunc VMCompiler::GetFunction(const std::string& name, const ObjectPtrOptimizeModule(args[0], args[1], args[2]); }); } else { @@ -900,7 +900,7 @@ void VMCompiler::SetParam(const std::string& name, runtime::NDArray data_in) { void VMCompiler::Lower(IRModule mod, const TargetsMap& targets, const tvm::Target& target_host) { if (params_.size()) { BaseFunc base_func = mod->Lookup("main"); - CHECK(base_func->IsInstance()) + ICHECK(base_func->IsInstance()) << "VM compiler expects to compile relay::Function"; auto f = relay::backend::BindParamsByName(Downcast(base_func), params_); auto gvar = mod->GetGlobalVar("main"); @@ -936,7 +936,7 @@ void VMCompiler::Lower(IRModule mod, const TargetsMap& targets, const tvm::Targe auto vm_func = func_compiler.Compile(gvar, func); size_t func_index = context_.global_map.at(gvar); - CHECK(func_index < exec_->functions.size()); + ICHECK(func_index < exec_->functions.size()); exec_->functions[func_index] = vm_func; } } @@ -1123,7 +1123,7 @@ void VMCompiler::Codegen() { if (target_str == "ext_dev") { // Collect metadata in functions that are handled by external codegen. - CHECK(mod->ContainGlobalVar(cfunc->func_name)); + ICHECK(mod->ContainGlobalVar(cfunc->func_name)); backend::ConstantUpdater const_visit(cfunc->func_name, ¶ms_); const_visit(Downcast(mod->Lookup(cfunc->func_name))); continue; diff --git a/src/relay/backend/vm/lambda_lift.cc b/src/relay/backend/vm/lambda_lift.cc index 22b8364534c8..f21d0967701a 100644 --- a/src/relay/backend/vm/lambda_lift.cc +++ b/src/relay/backend/vm/lambda_lift.cc @@ -82,7 +82,7 @@ class LambdaLifter : public ExprMutator { auto var = GetRef(var_node); if (!letrec_.empty() && var == letrec_.back()) { auto it = lambda_map_.find(var); - CHECK(it != lambda_map_.end()); + ICHECK(it != lambda_map_.end()); return Call(it->second, call->args, call_node->attrs, call_node->type_args); } } @@ -154,11 +154,12 @@ class LambdaLifter : public ExprMutator { lifted_func = MarkClosure(lifted_func); } - CHECK(lifted_func.defined()); + ICHECK(lifted_func.defined()); if (module_->ContainGlobalVar(name)) { const auto existing_func = module_->Lookup(name); - CHECK(tvm::StructuralEqual()(lifted_func, existing_func)) << "lifted function hash collision"; + ICHECK(tvm::StructuralEqual()(lifted_func, existing_func)) + << "lifted function hash collision"; // If an identical function already exists, use its global var. global = module_->GetGlobalVar(name); } else { diff --git a/src/relay/ir/dataflow_matcher.cc b/src/relay/ir/dataflow_matcher.cc index 50c05f2923bc..536e65979ee4 100644 --- a/src/relay/ir/dataflow_matcher.cc +++ b/src/relay/ir/dataflow_matcher.cc @@ -85,7 +85,7 @@ void DFPatternMatcher::ClearMap(size_t watermark) { bool DFPatternMatcher::VisitDFPattern(const DFPattern& pattern, const Expr& expr) { if (memoize_ && memo_.count(pattern)) { - CHECK_EQ(memo_[pattern].size(), 1); + ICHECK_EQ(memo_[pattern].size(), 1); return expr.same_as(memo_[pattern][0]); } else { auto watermark = matched_nodes_.size(); @@ -133,7 +133,7 @@ bool MatchRetValue(const ObjectRef& lhs, const TVMRetValue& rhs) { } break; default: - CHECK(false) << "Unsupported type code in Pattern Node " << rhs.type_code(); + ICHECK(false) << "Unsupported type code in Pattern Node " << rhs.type_code(); } return false; } @@ -644,7 +644,7 @@ class PatternGrouper { auto body = extractor.Mutate(expr); // Verify the pattern still holds - CHECK(DFPatternMatcher(body).Match(pattern_, body)); + ICHECK(DFPatternMatcher(body).Match(pattern_, body)); group.function = Function(params, body, NullValue(), Array()); group.name = extractor.GetName(); // Check to make sure we aren't overlapping with another group or creating an invalid fusion @@ -765,7 +765,7 @@ class PatternRewriter : protected MixedModeMutator { int count = 0; bool equal = true; static auto* structural_equal = runtime::Registry::Get("node.StructuralEqual"); - CHECK(structural_equal) << "node.StructuralEqual is not registered."; + ICHECK(structural_equal) << "node.StructuralEqual is not registered."; do { last = post; for (auto callback : callbacks) { diff --git a/src/relay/ir/expr.cc b/src/relay/ir/expr.cc index 237cb35d8455..f2e0b363eb2b 100644 --- a/src/relay/ir/expr.cc +++ b/src/relay/ir/expr.cc @@ -47,7 +47,7 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) .set_dispatch([](const ObjectRef& ref, ReprPrinter* p) { auto* node = static_cast(ref.get()); const PackedFunc* fprint = Registry::Get("relay._constant_repr"); - CHECK(fprint) << "unable to find printing function for constants"; + ICHECK(fprint) << "unable to find printing function for constants"; std::string data = (*fprint)(GetRef(node)); p->stream << "Constant(" << data << ")"; }); @@ -56,8 +56,8 @@ TensorType ConstantNode::tensor_type() const { auto dtype = DataType(data->dtype); Array shape; for (int i = 0; i < data->ndim; i++) { - CHECK_LE(data->shape[i], std::numeric_limits::max()); - CHECK_GE(data->shape[i], std::numeric_limits::min()); + ICHECK_LE(data->shape[i], std::numeric_limits::max()); + ICHECK_GE(data->shape[i], std::numeric_limits::min()); shape.push_back(tvm::IntImm(DataType::Int(32), data->shape[i])); } diff --git a/src/relay/ir/expr_functor.cc b/src/relay/ir/expr_functor.cc index a09179bcc585..a22b69c4ed1b 100644 --- a/src/relay/ir/expr_functor.cc +++ b/src/relay/ir/expr_functor.cc @@ -102,8 +102,8 @@ void ExpandDataflow(Expr expr, FCheckVisited fcheck_visited, FVisitLeaf fvisit_l } MixedModeVisitor::MixedModeVisitor(int visit_limit) { - CHECK(visit_limit > 0) << "Dataflow visit limit must be greater than 0"; - CHECK(visit_limit < 10) << "Dataflow visit limit must be less than 10"; + ICHECK(visit_limit > 0) << "Dataflow visit limit must be greater than 0"; + ICHECK(visit_limit < 10) << "Dataflow visit limit must be less than 10"; visit_limit_ = visit_limit; } @@ -524,13 +524,13 @@ class ExprBinder : public MixedModeMutator, PatternMutator { using MixedModeMutator::VisitExpr_; Expr VisitExpr_(const LetNode* op) final { - CHECK(!args_map_.count(op->var)) << "Cannot bind an internel variable in let"; + ICHECK(!args_map_.count(op->var)) << "Cannot bind an internel variable in let"; return ExprMutator::VisitExpr_(op); } Expr VisitExpr_(const FunctionNode* op) final { for (Var param : op->params) { - CHECK(!args_map_.count(param)) << "Cannnot bind an internal function parameter"; + ICHECK(!args_map_.count(param)) << "Cannnot bind an internal function parameter"; } return ExprMutator::VisitExpr_(op); } @@ -553,7 +553,7 @@ class ExprBinder : public MixedModeMutator, PatternMutator { } Var VisitVar(const Var& v) final { - CHECK(!args_map_.count(v)) << "Cannnot bind an internal pattern variable"; + ICHECK(!args_map_.count(v)) << "Cannnot bind an internal pattern variable"; return v; } @@ -584,7 +584,7 @@ Expr Bind(const Expr& expr, const tvm::Map& args_map) { } } ret = Function(new_params, new_body, func->ret_type, func->type_params, func->attrs); - CHECK_EQ(FreeVars(expr).size(), FreeVars(ret).size()); + ICHECK_EQ(FreeVars(expr).size(), FreeVars(ret).size()); return std::move(ret); } else { return ExprBinder(args_map).VisitExpr(expr); @@ -596,7 +596,7 @@ TVM_REGISTER_GLOBAL("relay.ir.Bind").set_body([](TVMArgs args, TVMRetValue* ret) if (input->IsInstance()) { *ret = Bind(Downcast(input), args[1]); } else { - CHECK(input->IsInstance()); + ICHECK(input->IsInstance()); *ret = Bind(Downcast(input), args[1]); } }); diff --git a/src/relay/ir/function.cc b/src/relay/ir/function.cc index 1439e8b59cf0..c9920a621b56 100644 --- a/src/relay/ir/function.cc +++ b/src/relay/ir/function.cc @@ -29,8 +29,8 @@ namespace relay { Function::Function(tvm::Array params, Expr body, Type ret_type, tvm::Array type_params, DictAttrs attrs, Span span) { ObjectPtr n = make_object(); - CHECK(params.defined()); - CHECK(type_params.defined()); + ICHECK(params.defined()); + ICHECK(type_params.defined()); n->params = std::move(params); n->body = std::move(body); n->ret_type = std::move(ret_type); diff --git a/src/relay/ir/indexed_graph.h b/src/relay/ir/indexed_graph.h index 70508279af21..4bbb741b760d 100644 --- a/src/relay/ir/indexed_graph.h +++ b/src/relay/ir/indexed_graph.h @@ -115,8 +115,8 @@ class IndexedGraph { return nullptr; } while (lhs != rhs) { - CHECK(lhs); - CHECK(rhs); + ICHECK(lhs); + ICHECK(rhs); if (lhs->depth_ < rhs->depth_) { rhs = rhs->dominator_parent_; } else if (lhs->depth_ > rhs->depth_) { diff --git a/src/relay/ir/transform.cc b/src/relay/ir/transform.cc index b5f4d152ee00..596f812e25af 100644 --- a/src/relay/ir/transform.cc +++ b/src/relay/ir/transform.cc @@ -128,7 +128,7 @@ IRModule FunctionPassNode::operator()(IRModule mod, const PassContext& pass_ctx) const PassInfo& pass_info = Info(); - CHECK(mod.defined()); + ICHECK(mod.defined()); DLOG(INFO) << "Executing function pass : " << pass_info->name << " with opt level: " << pass_info->opt_level; diff --git a/src/relay/op/algorithm/argsort.cc b/src/relay/op/algorithm/argsort.cc index a24097420873..455d413c2746 100644 --- a/src/relay/op/algorithm/argsort.cc +++ b/src/relay/op/algorithm/argsort.cc @@ -33,10 +33,10 @@ bool ArgsortRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // `types` contains: [data, result] const ArgsortAttrs* param = attrs.as(); - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) { - CHECK(types[0].as()) + ICHECK(types[0].as()) << "Argsort: expect input type to be TensorType but get " << types[0]; return false; } diff --git a/src/relay/op/algorithm/topk.cc b/src/relay/op/algorithm/topk.cc index 14308dd592d6..b0e4b5dc6b4e 100644 --- a/src/relay/op/algorithm/topk.cc +++ b/src/relay/op/algorithm/topk.cc @@ -34,15 +34,15 @@ bool TopKRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // `types` contains: [data, result] const TopKAttrs* param = attrs.as(); - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); - CHECK(data); + ICHECK(data); int ndim = data->shape.size(); int axis = param->axis; if (axis < 0) { axis += ndim; } - CHECK(axis >= 0 && axis < ndim); + ICHECK(axis >= 0 && axis < ndim); Array out_shape; for (int i = 0; i < ndim; ++i) { if (i != axis) { diff --git a/src/relay/op/dyn/algorithm/topk.cc b/src/relay/op/dyn/algorithm/topk.cc index 1c88730a5463..0ce0a18b2170 100644 --- a/src/relay/op/dyn/algorithm/topk.cc +++ b/src/relay/op/dyn/algorithm/topk.cc @@ -33,31 +33,31 @@ bool TopKRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // `types` contains: [data, k, result] const TopKAttrs* param = attrs.as(); - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); const auto* k = types[1].as(); if (data == nullptr) { - CHECK(types[0].as()) + ICHECK(types[0].as()) << "tile: expect input type to be TensorType but get " << types[0]; return false; } if (k == nullptr) { - CHECK(types[1].as()) + ICHECK(types[1].as()) << "tile: expect input type to be TensorType but get " << types[1]; return false; } - CHECK(k->shape.size() <= 1) << "Parameter k must be a Scalar or a Tensor of shape (1, )"; + ICHECK(k->shape.size() <= 1) << "Parameter k must be a Scalar or a Tensor of shape (1, )"; if (k->shape.size() == 1) { const IntImmNode* k_shape = k->shape[0].as(); - CHECK(k_shape) << "Parameter k must have static shape"; - CHECK_EQ(k_shape->value, 1) << "Parameter k must be a Scalar or a Tensor of shape (1, )"; + ICHECK(k_shape) << "Parameter k must have static shape"; + ICHECK_EQ(k_shape->value, 1) << "Parameter k must be a Scalar or a Tensor of shape (1, )"; } int ndim = data->shape.size(); int axis = param->axis; if (axis < 0) { axis += ndim; } - CHECK(axis >= 0 && axis < ndim); + ICHECK(axis >= 0 && axis < ndim); Array out_shape; for (int i = 0; i < ndim; ++i) { if (i != axis) { diff --git a/src/relay/op/dyn/image/resize.cc b/src/relay/op/dyn/image/resize.cc index 23e17400f29d..6581250db0cd 100644 --- a/src/relay/op/dyn/image/resize.cc +++ b/src/relay/op/dyn/image/resize.cc @@ -36,17 +36,17 @@ TVM_REGISTER_NODE_TYPE(ResizeAttrs); bool ResizeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // {data, size, out} - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); if (data == nullptr) return false; static const Layout kNCHW("NCHW"); const ResizeAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const Layout in_layout(param->layout); auto layout_converter = tir::BijectiveLayout(in_layout, kNCHW); - CHECK(layout_converter.defined()) + ICHECK(layout_converter.defined()) << "Resize only support input layouts that are convertible from NCHW." << " But got " << in_layout; diff --git a/src/relay/op/dyn/nn/pad.cc b/src/relay/op/dyn/nn/pad.cc index 73daccbd97fd..42ec784f8c15 100644 --- a/src/relay/op/dyn/nn/pad.cc +++ b/src/relay/op/dyn/nn/pad.cc @@ -41,7 +41,7 @@ namespace dyn { bool PadRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // types = [data_type, pad_width_type, pad_value_type, ret_type] - CHECK_EQ(types.size(), 4); + ICHECK_EQ(types.size(), 4); const auto* data = types[0].as(); if (data == nullptr) return false; @@ -52,13 +52,13 @@ bool PadRel(const Array& types, int num_inputs, const Attrs& attrs, if (pad_value == nullptr) return false; int data_rank = data->shape.size(); - CHECK(data_rank) << "Data shape must have static rank"; + ICHECK(data_rank) << "Data shape must have static rank"; int pad_width_rank = pad_width->shape.size(); - CHECK_EQ(pad_width_rank, 2) << "Pad width must be 2D"; + ICHECK_EQ(pad_width_rank, 2) << "Pad width must be 2D"; const PadAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); std::vector oshape; for (int i = 0; i < data_rank; i++) { @@ -72,7 +72,7 @@ bool PadRel(const Array& types, int num_inputs, const Attrs& attrs, Array PadCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const auto* param = attrs.as(); - CHECK(param); + ICHECK(param); auto data = inputs[0]; auto pad_width = inputs[1]; @@ -88,7 +88,7 @@ Array PadCompute(const Attrs& attrs, const Array& inputs } const auto* out_ttype = out_type.as(); - CHECK(out_ttype != nullptr); + ICHECK(out_ttype != nullptr); return Array{topi::pad(inputs[0], pad_before, pad_after, pad_value, "T_pad", topi::kElementWise, param->pad_mode, diff --git a/src/relay/op/dyn/nn/upsampling.cc b/src/relay/op/dyn/nn/upsampling.cc index 8a28475eacd5..93869757e96f 100644 --- a/src/relay/op/dyn/nn/upsampling.cc +++ b/src/relay/op/dyn/nn/upsampling.cc @@ -41,7 +41,7 @@ namespace dyn { bool UpSamplingRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // types = [data_type, scale_h_type, scale_w_type, ret_type] - CHECK_EQ(types.size(), 4); + ICHECK_EQ(types.size(), 4); const auto* data = types[0].as(); const auto* scale_h = types[1].as(); const auto* scale_w = types[2].as(); @@ -49,16 +49,16 @@ bool UpSamplingRel(const Array& types, int num_inputs, const Attrs& attrs, if (scale_h == nullptr) return false; if (scale_w == nullptr) return false; - CHECK_EQ(scale_h->shape.size(), 0); - CHECK_EQ(scale_w->shape.size(), 0); + ICHECK_EQ(scale_h->shape.size(), 0); + ICHECK_EQ(scale_w->shape.size(), 0); static const Layout kNCHW("NCHW"); const UpSamplingAttrs* param = attrs.as(); - CHECK(param); + ICHECK(param); const Layout in_layout(param->layout); auto layout_converter = tir::BijectiveLayout(in_layout, kNCHW); - CHECK(layout_converter.defined()) + ICHECK(layout_converter.defined()) << "UpSampling only supports input layouts that are convertible from NCHW." << " But got " << in_layout; @@ -122,18 +122,18 @@ RELAY_REGISTER_OP("dyn.nn.upsampling") bool UpSampling3DRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // types = [data_type, scale_d_type, scale_h_type, scale_w_type, ret_type] - CHECK_EQ(types.size(), 5); + ICHECK_EQ(types.size(), 5); const auto* data = types[0].as(); if (data == nullptr) return false; static const Layout kNCDHW("NCDHW"); const UpSampling3DAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const Layout in_layout(param->layout); auto layout_converter = tir::BijectiveLayout(in_layout, kNCDHW); - CHECK(layout_converter.defined()) + ICHECK(layout_converter.defined()) << "UpSampling3D only support input layouts that are convertible from NCDHW." << " But got " << in_layout; diff --git a/src/relay/op/dyn/nn/upsampling.h b/src/relay/op/dyn/nn/upsampling.h index 79ed65bba36b..acdc54174913 100644 --- a/src/relay/op/dyn/nn/upsampling.h +++ b/src/relay/op/dyn/nn/upsampling.h @@ -43,7 +43,7 @@ Array > UpsamplingInferCorrectLayout(const Attrs& attrs, // NOTE: Discard "const" qualifier here. T* params = const_cast(attrs.as()); if (new_in_layouts.defined()) { - CHECK_GT(new_in_layouts.size(), 0); + ICHECK_GT(new_in_layouts.size(), 0); Layout raw_layout(params->layout); Layout input = new_in_layouts[0]; diff --git a/src/relay/op/dyn/tensor/transform.cc b/src/relay/op/dyn/tensor/transform.cc index 863ad643f0da..119eba3da188 100644 --- a/src/relay/op/dyn/tensor/transform.cc +++ b/src/relay/op/dyn/tensor/transform.cc @@ -47,11 +47,11 @@ namespace dyn { bool ReshapeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // types: [data, newshape, result] - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); if (data == nullptr) { - CHECK(types[0].as()) + ICHECK(types[0].as()) << "reshape: expect input type to be TensorType but get " << types[0]; return false; } @@ -59,7 +59,7 @@ bool ReshapeRel(const Array& types, int num_inputs, const Attrs& attrs, Array oshape; const auto* newshape = types[1].as(); if (newshape == nullptr) { - CHECK(types[1].as()) + ICHECK(types[1].as()) << "reshape: expect input type to be TensorType but get " << types[1]; return false; } @@ -76,7 +76,7 @@ bool ReshapeRel(const Array& types, int num_inputs, const Attrs& attrs, Array ReshapeCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const auto* out_ttype = out_type.as(); - CHECK(out_ttype != nullptr); + ICHECK(out_ttype != nullptr); Array newshape; for (auto val : out_ttype->shape) { if (val->IsInstance()) { @@ -149,21 +149,21 @@ RELAY_REGISTER_OP("dyn.reshape") bool TileRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // `types` contains: [data, reps, result] - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); const auto* reps = types[1].as(); if (data == nullptr) { - CHECK(types[0].as()) + ICHECK(types[0].as()) << "tile: expect input type to be TensorType but get " << types[0]; return false; } if (reps == nullptr) { - CHECK(types[1].as()) + ICHECK(types[1].as()) << "tile: expect input type to be TensorType but get " << types[1]; return false; } const IntImmNode* reps_shape = reps->shape[0].as(); - CHECK(reps_shape) << "Parameter reps must have static shape"; + ICHECK(reps_shape) << "Parameter reps must have static shape"; const size_t ndim = data->shape.size(); const size_t rndim = reps_shape->value; size_t tndim = (ndim > rndim) ? ndim : rndim; @@ -178,7 +178,7 @@ bool TileRel(const Array& types, int num_inputs, const Attrs& attrs, Array TileCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { - CHECK_EQ(inputs.size(), 2); + ICHECK_EQ(inputs.size(), 2); const auto* out_ttype = out_type.as(); size_t rndim = inputs[1]->shape[0].as()->value; return {topi::dyn_tile(inputs[0], out_ttype->shape, rndim)}; @@ -212,7 +212,7 @@ RELAY_REGISTER_OP("dyn.tile") bool BroadCastToRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // types = [data_type, broadcast_shape_type, ret_type] - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* input_type = types[0].as(); const auto* target_type = types[1].as(); @@ -225,8 +225,9 @@ bool BroadCastToRel(const Array& types, int num_inputs, const Attrs& attrs auto out_dtype = input_type->dtype; // rank must be static const IntImmNode* rank = target_type->shape[0].as(); - CHECK(rank) << "Target shape must have static rank"; // rank must be static even in dyn pass - // could add support for dyn rank in futures + ICHECK(rank) + << "Target shape must have static rank"; // rank must be static even in dyn pass + // could add support for dyn rank in futures std::vector oshape; for (int i = 0; i < rank->value; ++i) { @@ -266,13 +267,13 @@ RELAY_REGISTER_OP("dyn.broadcast_to") bool InitOpRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // types = [zeros_shape, ret_type] - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const InitOpAttrs* param = attrs.as(); const auto* fill_shape = types[0].as(); DataType out_dtype = param->dtype; const IntImmNode* shape_shape = fill_shape->shape[0].as(); - CHECK(shape_shape) << "Parameter shape must have static rank"; + ICHECK(shape_shape) << "Parameter shape must have static rank"; std::vector oshape; for (int i = 0; i < shape_shape->value; ++i) { @@ -324,9 +325,9 @@ RELAY_REGISTER_OP("dyn.ones") bool OneHotRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // `types` contains: [indices, on_value, off_value, result] - CHECK_EQ(types.size(), 5); + ICHECK_EQ(types.size(), 5); const auto* indices = types[0].as(); - CHECK(indices); + ICHECK(indices); const auto param = attrs.as(); @@ -349,7 +350,7 @@ bool OneHotRel(const Array& types, int num_inputs, const Attrs& attrs, Array OneHotCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const auto* out_ttype = out_type.as(); return Array{topi::one_hot(inputs[0], inputs[1](), inputs[2](), -1, param->axis, param->dtype, out_ttype->shape)}; @@ -393,7 +394,7 @@ RELAY_REGISTER_OP("dyn.one_hot") bool FullRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const InitOpAttrs* param = attrs.as(); const auto* fill_value = types[0].as(); const auto* fill_shape = types[1].as(); @@ -406,11 +407,11 @@ bool FullRel(const Array& types, int num_inputs, const Attrs& attrs, out_dtype = fill_value->dtype; } - CHECK_EQ(fill_value->shape.size(), 0) + ICHECK_EQ(fill_value->shape.size(), 0) << "Fill value should be a scalar but has dimension " << fill_value->shape.size() << "."; const IntImmNode* rank = fill_shape->shape[0].as(); - CHECK(rank) << "Parameter shape must have static rank"; + ICHECK(rank) << "Parameter shape must have static rank"; std::vector oshape; for (int i = 0; i < rank->value; ++i) { @@ -449,7 +450,7 @@ RELAY_REGISTER_OP("dyn.full") bool StridedSliceRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // [data, begin, end, strides, out] - CHECK_EQ(types.size(), 5); + ICHECK_EQ(types.size(), 5); const StridedSliceAttrs* param = attrs.as(); if (param == nullptr) { return false; @@ -501,9 +502,9 @@ Array StridedSliceCompute(const Attrs& attrs, const Arrayshape.size(); - CHECK(begin->shape[0].as()->value == data_rank && - end->shape[0].as()->value == data_rank && - strides->shape[0].as()->value == data_rank) + ICHECK(begin->shape[0].as()->value == data_rank && + end->shape[0].as()->value == data_rank && + strides->shape[0].as()->value == data_rank) << "begin, end, and strides are required to have the same length" << " if they are dynamic variables."; return Array{DynamicStridedSlice(data, begin, end, strides)}; diff --git a/src/relay/op/image/dilation2d.cc b/src/relay/op/image/dilation2d.cc index 462f11f56d0d..1f8c7ec732d9 100644 --- a/src/relay/op/image/dilation2d.cc +++ b/src/relay/op/image/dilation2d.cc @@ -62,7 +62,7 @@ Expr MakeDilation2D(Expr data, Expr weight, Array strides, Array bool Dilation2DRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); const auto* weight = types[1].as(); if (data == nullptr) return false; @@ -70,23 +70,23 @@ bool Dilation2DRel(const Array& types, int num_inputs, const Attrs& attrs, static const Layout kOIHW("IHW"); const AttrType* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const Layout in_layout(param->data_layout); const Layout kernel_layout(param->kernel_layout); const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNCHW); - CHECK(trans_in_layout.defined()) + ICHECK(trans_in_layout.defined()) << "Dilation2D only support input layouts that are convertible from NCHW." << " But got " << in_layout; const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kOIHW); - CHECK(trans_kernel_layout.defined()) + ICHECK(trans_kernel_layout.defined()) << "Dilation2D only support kernel layouts that are convertible from OIHW." << " But got " << kernel_layout; Layout out_layout(param->data_layout); const auto trans_out_layout = tir::BijectiveLayout(out_layout, kNCHW); - CHECK(trans_out_layout.defined()) + ICHECK(trans_out_layout.defined()) << "Dilation2D only support output layouts that are convertible from NCHW." << " But got " << out_layout; diff --git a/src/relay/op/image/grid_sample.cc b/src/relay/op/image/grid_sample.cc index bc6989155323..d5fa68aed82a 100644 --- a/src/relay/op/image/grid_sample.cc +++ b/src/relay/op/image/grid_sample.cc @@ -35,21 +35,21 @@ TVM_REGISTER_NODE_TYPE(AffineGridAttrs); bool AffineGridRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) return false; auto batch_size = data->shape[0]; const AffineGridAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); Array oshape; - CHECK(data->shape.size() == 3U && reporter->AssertEQ(data->shape[1], 2) && - reporter->AssertEQ(data->shape[2], 3)) + ICHECK(data->shape.size() == 3U && reporter->AssertEQ(data->shape[1], 2) && + reporter->AssertEQ(data->shape[2], 3)) << "data should be an" "affine matrix with shape [batch_size, 2, 3]"; - CHECK(param->target_shape.defined() && param->target_shape.size() == 2) + ICHECK(param->target_shape.defined() && param->target_shape.size() == 2) << "target_shape should be 2D"; oshape.push_back(batch_size); oshape.push_back(2); @@ -97,12 +97,12 @@ TVM_REGISTER_NODE_TYPE(GridSampleAttrs); bool GridSampleRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); const auto* grid = types[1].as(); if (!data || !grid) return false; const auto* param = attrs.as(); - CHECK(param); + ICHECK(param); static const Layout kNCHW("NCHW"); const Layout in_layout(param->layout); auto layout_converter = tir::BijectiveLayout(in_layout, kNCHW); diff --git a/src/relay/op/image/resize.cc b/src/relay/op/image/resize.cc index 41b7afe6d00c..b8875e48ed0f 100644 --- a/src/relay/op/image/resize.cc +++ b/src/relay/op/image/resize.cc @@ -35,17 +35,17 @@ TVM_REGISTER_NODE_TYPE(ResizeAttrs); bool ResizeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) return false; static const Layout kNCHW("NCHW"); const ResizeAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const Layout in_layout(param->layout); auto layout_converter = tir::BijectiveLayout(in_layout, kNCHW); - CHECK(layout_converter.defined()) + ICHECK(layout_converter.defined()) << "Resize only support input layouts that are convertible from NCHW." << " But got " << in_layout; @@ -104,17 +104,17 @@ TVM_REGISTER_NODE_TYPE(Resize3dAttrs); bool Resize3dRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) return false; static const Layout kNCDHW("NCDHW"); const Resize3dAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const Layout in_layout(param->layout); auto layout_converter = tir::BijectiveLayout(in_layout, kNCDHW); - CHECK(layout_converter.defined()) + ICHECK(layout_converter.defined()) << "Resize3d only support input layouts that are convertible from NCDHW." << " But got " << in_layout; @@ -175,14 +175,14 @@ TVM_REGISTER_NODE_TYPE(CropAndResizeAttrs); bool CropAndResizeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 4); + ICHECK_EQ(types.size(), 4); const auto* data = types[0].as(); const auto* boxes = types[1].as(); const auto* box_indices = types[2].as(); if (data == nullptr || boxes == nullptr || box_indices == nullptr) return false; const CropAndResizeAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); auto crop_size = param->crop_size; DataType out_dtype = param->out_dtype; diff --git a/src/relay/op/memory/memory.cc b/src/relay/op/memory/memory.cc index b853ef635b12..dc5a1ebd3c73 100644 --- a/src/relay/op/memory/memory.cc +++ b/src/relay/op/memory/memory.cc @@ -54,19 +54,19 @@ TVM_REGISTER_GLOBAL("relay.op.memory._make.alloc_storage") bool AllocStorageRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3u); + ICHECK_EQ(types.size(), 3u); auto size_type = types[0]; auto tensor_type = size_type.as(); - CHECK(tensor_type != nullptr); - CHECK_EQ(tensor_type->dtype, DataType::Int(64)); - CHECK_EQ(tensor_type->shape.size(), 0); + ICHECK(tensor_type != nullptr); + ICHECK_EQ(tensor_type->dtype, DataType::Int(64)); + ICHECK_EQ(tensor_type->shape.size(), 0); auto align_type = types[1]; auto align_ttype = align_type.as(); - CHECK(align_ttype != nullptr); - CHECK_EQ(align_ttype->dtype, DataType::Int(64)); - CHECK_EQ(align_ttype->shape.size(), 0); + ICHECK(align_ttype != nullptr); + ICHECK_EQ(align_ttype->dtype, DataType::Int(64)); + ICHECK_EQ(align_ttype->shape.size(), 0); auto mod = reporter->GetModule(); - CHECK(mod.defined()); + ICHECK(mod.defined()); auto storage_name = mod->GetGlobalTypeVar("Storage"); auto storage = TypeCall(storage_name, {}); reporter->Assign(types[2], storage); @@ -107,10 +107,10 @@ TVM_REGISTER_GLOBAL("relay.op.memory._make.alloc_tensor") std::vector FromConstShape(Constant konst) { runtime::NDArray shape = konst->data; std::vector raw_shape; - CHECK_EQ(shape->ndim, 1u); - CHECK_EQ(shape->dtype.code, 0U) << "The dtype of constant shape must be int32 or int64, but got " - << runtime::DLDataType2String(shape->dtype); - CHECK(shape->dtype.bits == 64 || shape->dtype.bits == 32) + ICHECK_EQ(shape->ndim, 1u); + ICHECK_EQ(shape->dtype.code, 0U) << "The dtype of constant shape must be int32 or int64, but got " + << runtime::DLDataType2String(shape->dtype); + ICHECK(shape->dtype.bits == 64 || shape->dtype.bits == 32) << "The dtype of constant shape must be int32 or int64, but got" << runtime::DLDataType2String(shape->dtype); @@ -131,28 +131,28 @@ std::vector FromConstShape(Constant konst) { bool AllocTensorRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 4u); + ICHECK_EQ(types.size(), 4u); auto alloc_attrs = attrs.as(); - CHECK(alloc_attrs != nullptr) << "must be alloc_tensor attributes"; + ICHECK(alloc_attrs != nullptr) << "must be alloc_tensor attributes"; // First argument should be storage. auto mod = reporter->GetModule(); - CHECK(mod.defined()); + ICHECK(mod.defined()); auto storage_name = mod->GetGlobalTypeVar("Storage"); auto storage = relay::TypeCall(storage_name, {}); reporter->Assign(types[0], storage); // Second argument should be the offset. auto offset_type = types[1].as(); - CHECK(offset_type != nullptr) << "must be a scalar type"; + ICHECK(offset_type != nullptr) << "must be a scalar type"; // Third argument should be shape tensor. auto tt = types[2].as(); - CHECK(tt != nullptr) << "must be tensor type"; + ICHECK(tt != nullptr) << "must be tensor type"; // Be careful about having to allocate scalars. int64_t dims = 0; if (tt->shape.size() != 0) { auto rank = tt->shape[0].as(); - CHECK(rank != nullptr); + ICHECK(rank != nullptr); dims = rank->value; } @@ -161,14 +161,14 @@ bool AllocTensorRel(const Array& types, int num_inputs, const Attrs& attrs if (alloc_attrs->const_shape.defined()) { auto con = alloc_attrs->const_shape; auto sh = FromConstShape(con); - CHECK_EQ(sh.size(), dims); + ICHECK_EQ(sh.size(), dims); Array out_shape; for (auto i = 0u; i < dims; i++) { out_shape.push_back(tvm::Integer(sh[i])); } alloc_type = TensorType(out_shape, alloc_attrs->dtype); } else { - CHECK(alloc_attrs->assert_shape.defined()) + ICHECK(alloc_attrs->assert_shape.defined()) << "the assert_shape must be set when const_shape is not"; alloc_type = TensorType(alloc_attrs->assert_shape, alloc_attrs->dtype); return true; @@ -198,7 +198,7 @@ RELAY_REGISTER_OP("memory.alloc_tensor") bool KillRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2u); + ICHECK_EQ(types.size(), 2u); // TODO(@jroesch): should only support tensors. reporter->Assign(types[1], TupleType::Empty()); return true; diff --git a/src/relay/op/nn/bitserial.cc b/src/relay/op/nn/bitserial.cc index 61a1b8fdf289..853807997a4d 100644 --- a/src/relay/op/nn/bitserial.cc +++ b/src/relay/op/nn/bitserial.cc @@ -50,9 +50,9 @@ Array> BinaryConv2DInferCorrectLayout(const Attrs& attrs, bool BitPackRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { const BitPackAttrs* param = attrs.as(); - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); - CHECK(data); + ICHECK(data); int ndim = data->shape.size(); int bits = param->bits; int pack_axis = param->pack_axis; @@ -120,20 +120,20 @@ TVM_REGISTER_NODE_TYPE(BinaryConv2DAttrs); bool BinaryConv2DRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); if (data == nullptr) return false; const BinaryConv2DAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); static const Layout kNCHW("NCHW"); const Layout in_layout(param->data_layout); const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNCHW); Array dshape_nchw = trans_in_layout.ForwardShape(data->shape); - CHECK(param->channels.defined()); - CHECK(param->kernel_size.defined()); + ICHECK(param->channels.defined()); + ICHECK(param->kernel_size.defined()); Array oshape({dshape_nchw[0], param->channels, 0, 0}); IndexExpr pad_h, pad_w; GetPaddingHeightWidth(param->padding, &pad_h, &pad_w); @@ -199,15 +199,15 @@ TVM_REGISTER_NODE_TYPE(BinaryDenseAttrs); bool BinaryDenseRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); if (data == nullptr) return false; const BinaryDenseAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); - CHECK(static_cast(data->shape.size()) != 0); - CHECK(param->units.defined()); + ICHECK(static_cast(data->shape.size()) != 0); + ICHECK(param->units.defined()); Array oshape = data->shape; oshape.Set((oshape.size() - 1), param->units); diff --git a/src/relay/op/nn/convolution.h b/src/relay/op/nn/convolution.h index 935058c1a5b3..f0112227153d 100644 --- a/src/relay/op/nn/convolution.h +++ b/src/relay/op/nn/convolution.h @@ -40,7 +40,7 @@ namespace relay { template bool Conv1DRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); const auto* weight = types[1].as(); if (data == nullptr) return false; @@ -48,23 +48,23 @@ bool Conv1DRel(const Array& types, int num_inputs, const Attrs& attrs, static const Layout kOIW("OIW"); const AttrType* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const Layout in_layout(param->data_layout); const Layout kernel_layout(param->kernel_layout); const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNCW); - CHECK(trans_in_layout.defined()) + ICHECK(trans_in_layout.defined()) << "Conv only support input layouts that are convertible from NCW." << " But got " << in_layout; const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kOIW); - CHECK(trans_kernel_layout.defined()) + ICHECK(trans_kernel_layout.defined()) << "Conv only support kernel layouts that are convertible from OIW." << " But got " << kernel_layout; Layout out_layout(param->out_layout == "" ? param->data_layout : param->out_layout); const auto trans_out_layout = tir::BijectiveLayout(out_layout, kNCW); - CHECK(trans_out_layout.defined()) + ICHECK(trans_out_layout.defined()) << "Conv only support output layouts that are convertible from NCW." << " But got " << out_layout; @@ -92,17 +92,17 @@ bool Conv1DRel(const Array& types, int num_inputs, const Attrs& attrs, auto wshape = trans_kernel_layout.ForwardShape(weight->shape); if (param->kernel_size.defined()) { // check the size - CHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2])) + ICHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2])) << "Conv1D: shape of weight is inconsistent with kernel_size, " << " kernel_size=" << param->kernel_size << " wshape=" << wshape; } if (param->channels.defined()) { - CHECK(reporter->AssertEQ(param->channels, wshape[0])) + ICHECK(reporter->AssertEQ(param->channels, wshape[0])) << "Conv1D: shape of weight is inconsistent with channels, " << " channels=" << param->channels << " wshape=" << wshape; } if (!dshape_ncw[1].as() && !wshape[1].as()) { - CHECK(reporter->AssertEQ(dshape_ncw[1], wshape[1])); + ICHECK(reporter->AssertEQ(dshape_ncw[1], wshape[1])); } channels = wshape[0]; dilated_ksize = 1 + (wshape[2] - 1) * param->dilation[0]; @@ -139,7 +139,7 @@ bool Conv2DRel(const Array& types, int num_inputs, const Attrs& attrs, static const Layout kOIHW("OIHW"); const AttrType* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const Layout in_layout(param->data_layout); const Layout kernel_layout(param->kernel_layout); @@ -191,8 +191,8 @@ bool Conv2DRel(const Array& types, int num_inputs, const Attrs& attrs, IndexExpr channels, dilated_ksize_y, dilated_ksize_x; // infer weight if the kernel_size and channels are defined if (param->kernel_size.defined() && param->channels.defined()) { - CHECK_EQ(param->kernel_size.size(), 2); - CHECK_EQ(param->dilation.size(), 2); + ICHECK_EQ(param->kernel_size.size(), 2); + ICHECK_EQ(param->dilation.size(), 2); Array wshape; if (is_depthwise) { @@ -291,7 +291,7 @@ bool Conv2DRel(const Array& types, int num_inputs, const Attrs& attrs, template bool Conv3DRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); const auto* weight = types[1].as(); if (data == nullptr) return false; @@ -299,23 +299,23 @@ bool Conv3DRel(const Array& types, int num_inputs, const Attrs& attrs, static const Layout kOIDHW("OIDHW"); const AttrType* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const Layout in_layout(param->data_layout); const Layout kernel_layout(param->kernel_layout); const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNCDHW); - CHECK(trans_in_layout.defined()) + ICHECK(trans_in_layout.defined()) << "Conv only support input layouts that are convertible from NCDHW." << " But got " << in_layout; const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kOIDHW); - CHECK(trans_kernel_layout.defined()) + ICHECK(trans_kernel_layout.defined()) << "Conv only support kernel layouts that are convertible from OIDHW." << " But got " << kernel_layout; Layout out_layout(param->out_layout == "" ? param->data_layout : param->out_layout); const auto trans_out_layout = tir::BijectiveLayout(out_layout, kNCDHW); - CHECK(trans_out_layout.defined()) + ICHECK(trans_out_layout.defined()) << "Conv only support output layouts that are convertible from NCDHW." << " But got " << out_layout; @@ -324,8 +324,8 @@ bool Conv3DRel(const Array& types, int num_inputs, const Attrs& attrs, IndexExpr channels, dilated_ksize_z, dilated_ksize_y, dilated_ksize_x; // infer weight if the kernel_size and channels are defined if (param->kernel_size.defined() && param->channels.defined()) { - CHECK_EQ(param->kernel_size.size(), 3); - CHECK_EQ(param->dilation.size(), 3); + ICHECK_EQ(param->kernel_size.size(), 3); + ICHECK_EQ(param->dilation.size(), 3); Array wshape; tvm::tir::ExprDeepEqual expr_equal; @@ -355,23 +355,23 @@ bool Conv3DRel(const Array& types, int num_inputs, const Attrs& attrs, if (weight == nullptr) return false; auto wshape = trans_kernel_layout.ForwardShape(weight->shape); if (param->kernel_size.defined()) { - CHECK_EQ(param->kernel_size.size(), 3); + ICHECK_EQ(param->kernel_size.size(), 3); // check the size - CHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]) && - reporter->AssertEQ(param->kernel_size[1], wshape[3]) && - reporter->AssertEQ(param->kernel_size[2], wshape[4])) + ICHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]) && + reporter->AssertEQ(param->kernel_size[1], wshape[3]) && + reporter->AssertEQ(param->kernel_size[2], wshape[4])) << "Conv3D: shape of weight is inconsistent with kernel_size, " << " kernel_size=" << param->kernel_size << " wshape=" << wshape; } if (param->channels.defined()) { - CHECK(reporter->AssertEQ(param->channels, wshape[0])) + ICHECK(reporter->AssertEQ(param->channels, wshape[0])) << "Conv3D: shape of weight is inconsistent with channels, " << " channels=" << param->channels << " wshape=" << wshape; } if (!dshape_ncdhw[1].as() && !wshape[1].as()) { - CHECK(reporter->AssertEQ(indexdiv(dshape_ncdhw[1], param->groups), wshape[1])); + ICHECK(reporter->AssertEQ(indexdiv(dshape_ncdhw[1], param->groups), wshape[1])); } channels = wshape[0]; dilated_ksize_z = 1 + (wshape[2] - 1) * param->dilation[0]; @@ -413,14 +413,14 @@ bool Conv3DRel(const Array& types, int num_inputs, const Attrs& attrs, // Winograd convolution shape relations inline bool Conv2DWinogradWeightTransformRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) return false; const ConvWinogradWeightTransformAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); - CHECK_EQ(data->shape.size(), 4) << "Only support NCHW normal kernel layout"; + ICHECK_EQ(data->shape.size(), 4) << "Only support NCHW normal kernel layout"; std::vector oshape{ param->tile_size + data->shape[2] - 1, @@ -458,16 +458,16 @@ inline bool Conv2DWinogradWeightTransformRel(const Array& types, int num_i // inline bool Conv2DGemmWeightTransformRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* weight = types[0].as(); if (weight == nullptr) return false; const ConvGemmWeightTransformAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); int n = param->tile_rows; int k = param->tile_cols; - CHECK_EQ(weight->shape.size(), 4) << "Only support HWIO kernel layout"; + ICHECK_EQ(weight->shape.size(), 4) << "Only support HWIO kernel layout"; const auto K = weight->shape[0] * weight->shape[1] * weight->shape[2]; const auto N = weight->shape[3]; @@ -494,14 +494,14 @@ inline bool Conv2DGemmWeightTransformRel(const Array& types, int num_input inline bool Conv3DWinogradWeightTransformRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) return false; const ConvWinogradWeightTransformAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); - CHECK_EQ(data->shape.size(), 5) << "Only support NCDHW normal kernel layout"; + ICHECK_EQ(data->shape.size(), 5) << "Only support NCDHW normal kernel layout"; // Shape of packed weights depends on whether depth is being transformed or not. Array oshape({0, 0, 0, data->shape[0], data->shape[1]}); @@ -524,7 +524,7 @@ inline bool Conv3DWinogradWeightTransformRel(const Array& types, int num_i inline bool Conv2DWinogradNNPACKWeightTransformRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) { return false; @@ -532,9 +532,9 @@ inline bool Conv2DWinogradNNPACKWeightTransformRel(const Array& types, int const Conv2DWinogradNNPACKWeightTransformAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); - CHECK_EQ(data->shape.size(), 4) << "Only support NCHW normal kernel layout"; + ICHECK_EQ(data->shape.size(), 4) << "Only support NCHW normal kernel layout"; std::vector oshape{ data->shape[0], @@ -554,30 +554,30 @@ inline bool Conv2DWinogradNNPACKWeightTransformRel(const Array& types, int template bool Conv2DWinogradRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); if (data == nullptr) return false; static const Layout kNCHW("NCHW"); static const Layout kOIHW("OIHW"); const AttrType* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const Layout in_layout(param->data_layout); const Layout kernel_layout(param->kernel_layout); const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNCHW); - CHECK(trans_in_layout.defined()) + ICHECK(trans_in_layout.defined()) << "Conv only support input layouts that are convertible from NCHW." << " But got " << in_layout; const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kOIHW); - CHECK(trans_kernel_layout.defined()) + ICHECK(trans_kernel_layout.defined()) << "Conv only support kernel layouts that are convertible from OIHW." << " But got " << kernel_layout; Layout out_layout(param->out_layout == "" ? param->data_layout : param->out_layout); const auto trans_out_layout = tir::BijectiveLayout(out_layout, kNCHW); - CHECK(trans_out_layout.defined()) + ICHECK(trans_out_layout.defined()) << "Conv only support output layouts that are convertible from NCHW." << " But got " << out_layout; @@ -585,11 +585,11 @@ bool Conv2DWinogradRel(const Array& types, int num_inputs, const Attrs& at IndexExpr channels, dilated_ksize_y, dilated_ksize_x; - CHECK(param->kernel_size.defined() && param->channels.defined()) + ICHECK(param->kernel_size.defined() && param->channels.defined()) << "The kernel size and channels of a Conv must be set or inferred by previous pass"; - CHECK_EQ(param->kernel_size.size(), 2); - CHECK_EQ(param->dilation.size(), 2); + ICHECK_EQ(param->kernel_size.size(), 2); + ICHECK_EQ(param->dilation.size(), 2); channels = param->channels; dilated_ksize_y = 1 + (param->kernel_size[0] - 1) * param->dilation[0]; @@ -631,30 +631,30 @@ bool Conv2DWinogradRel(const Array& types, int num_inputs, const Attrs& at template bool Conv2DGemmRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); if (data == nullptr) return false; static const Layout kNHWC("NHWC"); static const Layout kHWIO("HWIO"); const AttrType* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const Layout in_layout(param->data_layout); const Layout kernel_layout(param->kernel_layout); const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNHWC); - CHECK(trans_in_layout.defined()) + ICHECK(trans_in_layout.defined()) << "Conv only support input layouts that are convertible from NHWC." << " But got " << in_layout; const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kHWIO); - CHECK(trans_kernel_layout.defined()) + ICHECK(trans_kernel_layout.defined()) << "Conv only support kernel layouts that are convertible from HWIO." << " But got " << kernel_layout; Layout out_layout(param->out_layout == "" ? param->data_layout : param->out_layout); const auto trans_out_layout = tir::BijectiveLayout(out_layout, kNHWC); - CHECK(trans_out_layout.defined()) + ICHECK(trans_out_layout.defined()) << "Conv only support output layouts that are convertible from NHWC." << " But got " << out_layout; @@ -662,11 +662,11 @@ bool Conv2DGemmRel(const Array& types, int num_inputs, const Attrs& attrs, IndexExpr channels, dilated_ksize_y, dilated_ksize_x; - CHECK(param->kernel_size.defined() && param->channels.defined()) + ICHECK(param->kernel_size.defined() && param->channels.defined()) << "The kernel size and channels of a Conv must be set or inferred by previous pass"; - CHECK_EQ(param->kernel_size.size(), 2); - CHECK_EQ(param->dilation.size(), 2); + ICHECK_EQ(param->kernel_size.size(), 2); + ICHECK_EQ(param->dilation.size(), 2); channels = param->channels; dilated_ksize_y = 1 + (param->kernel_size[0] - 1) * param->dilation[0]; @@ -703,30 +703,30 @@ bool Conv2DGemmRel(const Array& types, int num_inputs, const Attrs& attrs, template bool Conv3DWinogradRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); if (data == nullptr) return false; static const Layout kNCDHW("NCDHW"); static const Layout kOIDHW("OIDHW"); const AttrType* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const Layout in_layout(param->data_layout); const Layout kernel_layout(param->kernel_layout); const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNCDHW); - CHECK(trans_in_layout.defined()) + ICHECK(trans_in_layout.defined()) << "Conv only support input layouts that are convertible from NCDHW." << " But got " << in_layout; const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kOIDHW); - CHECK(trans_kernel_layout.defined()) + ICHECK(trans_kernel_layout.defined()) << "Conv only support kernel layouts that are convertible from OIDHW." << " But got " << kernel_layout; Layout out_layout(param->out_layout == "" ? param->data_layout : param->out_layout); const auto trans_out_layout = tir::BijectiveLayout(out_layout, kNCDHW); - CHECK(trans_out_layout.defined()) + ICHECK(trans_out_layout.defined()) << "Conv only support output layouts that are convertible from NCDHW." << " But got " << out_layout; @@ -734,11 +734,11 @@ bool Conv3DWinogradRel(const Array& types, int num_inputs, const Attrs& at IndexExpr channels, dilated_ksize_d, dilated_ksize_y, dilated_ksize_x; - CHECK(param->kernel_size.defined() && param->channels.defined()) + ICHECK(param->kernel_size.defined() && param->channels.defined()) << "The kernel size and channels of a Conv must be set or inferred by previous pass"; - CHECK_EQ(param->kernel_size.size(), 3); - CHECK_EQ(param->dilation.size(), 3); + ICHECK_EQ(param->kernel_size.size(), 3); + ICHECK_EQ(param->dilation.size(), 3); channels = param->channels; dilated_ksize_d = 1 + (param->kernel_size[0] - 1) * param->dilation[0]; @@ -787,7 +787,7 @@ bool Conv3DWinogradRel(const Array& types, int num_inputs, const Attrs& at template bool Conv1DTransposeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); const auto* weight = types[1].as(); if (data == nullptr) return false; @@ -796,23 +796,23 @@ bool Conv1DTransposeRel(const Array& types, int num_inputs, const Attrs& a static const Layout kOIW("OIW"); const Conv1DTransposeAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const Layout in_layout(param->data_layout); const Layout kernel_layout(param->kernel_layout); const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNCW); - CHECK(trans_in_layout.defined()) + ICHECK(trans_in_layout.defined()) << "Conv only support input layouts that are convertible from NCW." << " But got " << in_layout; const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kOIW); - CHECK(trans_kernel_layout.defined()) + ICHECK(trans_kernel_layout.defined()) << "Conv only support kernel layouts that are convertible from OIW." << " But got " << kernel_layout; Layout out_layout(param->out_layout == "" ? param->data_layout : param->out_layout); const auto trans_out_layout = tir::BijectiveLayout(out_layout, kNCW); - CHECK(trans_out_layout.defined()) + ICHECK(trans_out_layout.defined()) << "Conv only support output layouts that are convertible from NCW." << " But got " << out_layout; @@ -822,8 +822,8 @@ bool Conv1DTransposeRel(const Array& types, int num_inputs, const Attrs& a // infer weight if the kernel_size and channels are defined if (param->kernel_size.defined() && param->channels.defined()) { - CHECK_EQ(param->kernel_size.size(), 1); - CHECK_EQ(param->dilation.size(), 1); + ICHECK_EQ(param->kernel_size.size(), 1); + ICHECK_EQ(param->dilation.size(), 1); Array wshape( {dshape_ncw[1], indexdiv(param->channels, param->groups), param->kernel_size[0]}); @@ -839,19 +839,19 @@ bool Conv1DTransposeRel(const Array& types, int num_inputs, const Attrs& a if (weight == nullptr) return false; auto wshape = trans_kernel_layout.ForwardShape(weight->shape); if (param->kernel_size.defined()) { - CHECK_EQ(param->kernel_size.size(), 1); + ICHECK_EQ(param->kernel_size.size(), 1); // check the size - CHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2])) + ICHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2])) << "Conv1D: shape of weight is inconsistent with kernel_size, " << " kernel_size=" << param->kernel_size << " wshape=" << Array(wshape); } if (param->channels.defined()) { - CHECK(reporter->AssertEQ(param->channels, wshape[1])) + ICHECK(reporter->AssertEQ(param->channels, wshape[1])) << "Conv1D: shape of weight is inconsistent with channels, " << " channels=" << param->channels << " wshape=" << Array(wshape); } if (!dshape_ncw[1].as() && !wshape[0].as()) { - CHECK(reporter->AssertEQ(indexdiv(dshape_ncw[1], param->groups), wshape[0])); + ICHECK(reporter->AssertEQ(indexdiv(dshape_ncw[1], param->groups), wshape[0])); } channels = wshape[1]; dilated_ksize_x = 1 + (wshape[2] - 1) * param->dilation[0]; @@ -879,7 +879,7 @@ bool Conv1DTransposeRel(const Array& types, int num_inputs, const Attrs& a template bool Conv3DTransposeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); const auto* weight = types[1].as(); if (data == nullptr) return false; @@ -888,23 +888,23 @@ bool Conv3DTransposeRel(const Array& types, int num_inputs, const Attrs& a static const Layout kOIDHW("OIDHW"); const Conv3DTransposeAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const Layout in_layout(param->data_layout); const Layout kernel_layout(param->kernel_layout); const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNCDHW); - CHECK(trans_in_layout.defined()) + ICHECK(trans_in_layout.defined()) << "Conv3d_transpose only support input layouts that are convertible from NCDHW." << " But got " << in_layout; const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kOIDHW); - CHECK(trans_kernel_layout.defined()) + ICHECK(trans_kernel_layout.defined()) << "Conv3d_transpose only support kernel layouts that are convertible from OIDHW." << " But got " << kernel_layout; Layout out_layout(param->out_layout == "" ? param->data_layout : param->out_layout); const auto trans_out_layout = tir::BijectiveLayout(out_layout, kNCDHW); - CHECK(trans_out_layout.defined()) + ICHECK(trans_out_layout.defined()) << "Conv3d_transpose only support output layouts that are convertible from NCDHW." << " But got " << out_layout; @@ -914,8 +914,8 @@ bool Conv3DTransposeRel(const Array& types, int num_inputs, const Attrs& a // infer weight if the kernel_size and channels are defined if (param->kernel_size.defined() && param->channels.defined()) { - CHECK_EQ(param->kernel_size.size(), 3); - CHECK_EQ(param->dilation.size(), 3); + ICHECK_EQ(param->kernel_size.size(), 3); + ICHECK_EQ(param->dilation.size(), 3); Array wshape({dshape_ncdhw[1], indexdiv(param->channels, param->groups), param->kernel_size[0], param->kernel_size[1], param->kernel_size[2]}); @@ -933,21 +933,21 @@ bool Conv3DTransposeRel(const Array& types, int num_inputs, const Attrs& a if (weight == nullptr) return false; auto wshape = trans_kernel_layout.ForwardShape(weight->shape); if (param->kernel_size.defined()) { - CHECK_EQ(param->kernel_size.size(), 3); + ICHECK_EQ(param->kernel_size.size(), 3); // check the size - CHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]) && - reporter->AssertEQ(param->kernel_size[1], wshape[3]) && - reporter->AssertEQ(param->kernel_size[2], wshape[4])) + ICHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]) && + reporter->AssertEQ(param->kernel_size[1], wshape[3]) && + reporter->AssertEQ(param->kernel_size[2], wshape[4])) << "Conv3D: shape of weight is inconsistent with kernel_size, " << " kernel_size=" << param->kernel_size << " wshape=" << Array(wshape); } if (param->channels.defined()) { - CHECK(reporter->AssertEQ(param->channels, wshape[1])) + ICHECK(reporter->AssertEQ(param->channels, wshape[1])) << "Conv3D: shape of weight is inconsistent with channels, " << " channels=" << param->channels << " wshape=" << Array(wshape); } if (!dshape_ncdhw[1].as() && !wshape[0].as()) { - CHECK(reporter->AssertEQ(indexdiv(dshape_ncdhw[1], param->groups), wshape[0])); + ICHECK(reporter->AssertEQ(indexdiv(dshape_ncdhw[1], param->groups), wshape[0])); } channels = wshape[1]; dilated_ksize_d = 1 + (wshape[2] - 1) * param->dilation[0]; @@ -991,7 +991,7 @@ bool Conv3DTransposeRel(const Array& types, int num_inputs, const Attrs& a template bool Conv2DTransposeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); const auto* weight = types[1].as(); if (data == nullptr) return false; @@ -1000,23 +1000,23 @@ bool Conv2DTransposeRel(const Array& types, int num_inputs, const Attrs& a static const Layout kOIHW("OIHW"); const Conv2DTransposeAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const Layout in_layout(param->data_layout); const Layout kernel_layout(param->kernel_layout); const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNCHW); - CHECK(trans_in_layout.defined()) + ICHECK(trans_in_layout.defined()) << "Conv only support input layouts that are convertible from NCHW." << " But got " << in_layout; const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kOIHW); - CHECK(trans_kernel_layout.defined()) + ICHECK(trans_kernel_layout.defined()) << "Conv only support kernel layouts that are convertible from OIHW." << " But got " << kernel_layout; Layout out_layout(param->out_layout == "" ? param->data_layout : param->out_layout); const auto trans_out_layout = tir::BijectiveLayout(out_layout, kNCHW); - CHECK(trans_out_layout.defined()) + ICHECK(trans_out_layout.defined()) << "Conv only support output layouts that are convertible from NCHW." << " But got " << out_layout; @@ -1026,8 +1026,8 @@ bool Conv2DTransposeRel(const Array& types, int num_inputs, const Attrs& a // infer weight if the kernel_size and channels are defined if (param->kernel_size.defined() && param->channels.defined()) { - CHECK_EQ(param->kernel_size.size(), 2); - CHECK_EQ(param->dilation.size(), 2); + ICHECK_EQ(param->kernel_size.size(), 2); + ICHECK_EQ(param->dilation.size(), 2); Array wshape({dshape_nchw[1], indexdiv(param->channels, param->groups), param->kernel_size[0], param->kernel_size[1]}); @@ -1044,20 +1044,20 @@ bool Conv2DTransposeRel(const Array& types, int num_inputs, const Attrs& a if (weight == nullptr) return false; auto wshape = trans_kernel_layout.ForwardShape(weight->shape); if (param->kernel_size.defined()) { - CHECK_EQ(param->kernel_size.size(), 2); + ICHECK_EQ(param->kernel_size.size(), 2); // check the size - CHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]) && - reporter->AssertEQ(param->kernel_size[1], wshape[3])) + ICHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]) && + reporter->AssertEQ(param->kernel_size[1], wshape[3])) << "Conv2D: shape of weight is inconsistent with kernel_size, " << " kernel_size=" << param->kernel_size << " wshape=" << Array(wshape); } if (param->channels.defined()) { - CHECK(reporter->AssertEQ(param->channels, wshape[1])) + ICHECK(reporter->AssertEQ(param->channels, wshape[1])) << "Conv2D: shape of weight is inconsistent with channels, " << " channels=" << param->channels << " wshape=" << Array(wshape); } if (!dshape_nchw[1].as() && !wshape[0].as()) { - CHECK(reporter->AssertEQ(indexdiv(dshape_nchw[1], param->groups), wshape[0])); + ICHECK(reporter->AssertEQ(indexdiv(dshape_nchw[1], param->groups), wshape[0])); } channels = wshape[1]; dilated_ksize_y = 1 + (wshape[2] - 1) * param->dilation[0]; @@ -1093,21 +1093,21 @@ bool Conv2DTransposeRel(const Array& types, int num_inputs, const Attrs& a template bool DeformableConv2DRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 4); + ICHECK_EQ(types.size(), 4); const auto* data = types[0].as(); const auto* weight = types[2].as(); - CHECK(data); + ICHECK(data); auto* param = attrs.as(); - CHECK_EQ(param->data_layout, "NCHW") << "data layout not supported."; - CHECK_EQ(param->kernel_layout, "OIHW") << "kernel_layout not supported."; + ICHECK_EQ(param->data_layout, "NCHW") << "data layout not supported."; + ICHECK_EQ(param->kernel_layout, "OIHW") << "kernel_layout not supported."; IndexExpr channels, dilated_ksize_y, dilated_ksize_x, ksize_y, ksize_x; // infer weight shape if kernel_size and channels are defiend if (param->kernel_size.defined() && param->channels.defined()) { - CHECK_EQ(param->kernel_size.size(), 2); - CHECK_EQ(param->dilation.size(), 2); + ICHECK_EQ(param->kernel_size.size(), 2); + ICHECK_EQ(param->dilation.size(), 2); Array wshape({param->channels, indexdiv(data->shape[1], param->groups), param->kernel_size[0], param->kernel_size[1]}); channels = param->channels; @@ -1122,20 +1122,20 @@ bool DeformableConv2DRel(const Array& types, int num_inputs, const Attrs& if (weight == nullptr) return false; auto wshape = weight->shape; if (param->kernel_size.defined()) { - CHECK_EQ(param->kernel_size.size(), 2); + ICHECK_EQ(param->kernel_size.size(), 2); // check the size - CHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]) && - reporter->AssertEQ(param->kernel_size[1], wshape[3])) + ICHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]) && + reporter->AssertEQ(param->kernel_size[1], wshape[3])) << "DeformableConv2D: shape of weight is inconsistent with kernel_size, " << " kernel_size=" << param->kernel_size << " wshape=" << wshape; } if (param->channels.defined()) { - CHECK(reporter->AssertEQ(param->channels, wshape[0])) + ICHECK(reporter->AssertEQ(param->channels, wshape[0])) << "DeformableConv2D: shape of weight is inconsistent with channels, " << " channels=" << param->channels << " wshape=" << wshape; } if (!data->shape[1].as() && !wshape[1].as()) { - CHECK(reporter->AssertEQ(indexdiv(data->shape[1], param->groups), wshape[1])); + ICHECK(reporter->AssertEQ(indexdiv(data->shape[1], param->groups), wshape[1])); } channels = wshape[0]; ksize_y = wshape[2]; diff --git a/src/relay/op/nn/correlation.cc b/src/relay/op/nn/correlation.cc index 5970cc75b2a9..0c2f481e10cb 100644 --- a/src/relay/op/nn/correlation.cc +++ b/src/relay/op/nn/correlation.cc @@ -64,14 +64,14 @@ Expr MakeCorrelation(Expr data1, Expr data2, int kernel_size, int max_displaceme bool CorrelationRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data1 = types[0].as(); const auto* data2 = types[1].as(); if (data1 == nullptr || data2 == nullptr) return false; const CorrelationAttrs* param = attrs.as(); - CHECK(param != nullptr); - CHECK_EQ(param->layout, "NCHW") << "layout not supported."; + ICHECK(param != nullptr); + ICHECK_EQ(param->layout, "NCHW") << "layout not supported."; IndexExpr pad_h, pad_w; GetPaddingHeightWidth(param->padding, &pad_h, &pad_w); IndexExpr padded_height = data1->shape[2] + pad_h; diff --git a/src/relay/op/nn/nn.cc b/src/relay/op/nn/nn.cc index 58dfab27a933..ea25c1a9c0f9 100644 --- a/src/relay/op/nn/nn.cc +++ b/src/relay/op/nn/nn.cc @@ -50,17 +50,17 @@ TVM_REGISTER_NODE_TYPE(BiasAddAttrs); bool BiasAddRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); if (data == nullptr) return false; const BiasAddAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); int axis = param->axis; if (axis < 0) { axis = data->shape.size() + axis; } - CHECK_LE(axis, static_cast(data->shape.size())) + ICHECK_LE(axis, static_cast(data->shape.size())) << "axis " << param->axis << " is out of range"; // assign output type @@ -107,15 +107,15 @@ Expr MakeFIFOBuffer(Expr input, Expr buffer, int axis) { bool FIFOBufferRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* input = types[0].as(); const auto* buffer = types[1].as(); const FIFOBufferAttrs* param = attrs.as(); if (input == nullptr || buffer == nullptr) { return false; } - CHECK(param != nullptr); - CHECK_EQ(input->shape.size(), buffer->shape.size()); + ICHECK(param != nullptr); + ICHECK_EQ(input->shape.size(), buffer->shape.size()); const size_t buffer_axis = static_cast( param->axis < 0 ? static_cast(buffer->shape.size()) + param->axis : param->axis); @@ -221,14 +221,14 @@ TVM_REGISTER_NODE_TYPE(PReluAttrs); bool PReluRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); if (data == nullptr) return false; const PReluAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); - CHECK(param->axis < static_cast(data->shape.size())) + ICHECK(param->axis < static_cast(data->shape.size())) << "Wrong axis (" << param->axis << ")value."; // assign alpha type @@ -245,11 +245,11 @@ Array> PReluInferCorrectLayout(const Attrs& attrs, const Array& new_in_layouts, const Array& old_in_layouts, const Array& old_in_types) { - CHECK_EQ(old_in_layouts.size(), 2U); - CHECK_EQ(old_in_types.size(), 2U); + ICHECK_EQ(old_in_layouts.size(), 2U); + ICHECK_EQ(old_in_types.size(), 2U); Layout data_layout = old_in_layouts[0]; if (new_in_layouts.defined()) { - CHECK_EQ(new_in_layouts.size(), 2U); + ICHECK_EQ(new_in_layouts.size(), 2U); } return Array>{{data_layout, Layout("C")}, {data_layout}}; } @@ -335,8 +335,8 @@ RELAY_REGISTER_OP("nn.log_softmax") .set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, const Type& out_type) { const auto* param = attrs.as(); - CHECK(param != nullptr); - CHECK(param->axis == -1 || param->axis == static_cast(inputs[0].ndim()) - 1) + ICHECK(param != nullptr); + ICHECK(param->axis == -1 || param->axis == static_cast(inputs[0].ndim()) - 1) << "log_softmax currently only works on last dimension"; return Array{topi::nn::log_softmax(inputs[0])}; }); @@ -344,7 +344,7 @@ RELAY_REGISTER_OP("nn.log_softmax") // relay.nn.batch_flatten bool BatchFlattenRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) return false; if (data->shape.size() == 0) return false; @@ -499,7 +499,7 @@ TVM_REGISTER_NODE_TYPE(DropoutAttrs); bool DropoutRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) return false; @@ -544,7 +544,7 @@ Array> BatchNormInferCorrectLayout(const Attrs& attrs, Array> old_in_shapes; for (auto old_in_t : old_in_types) { - CHECK(old_in_t.as()); + ICHECK(old_in_t.as()); old_in_shapes.push_back(old_in_t.as()->shape); } @@ -572,14 +572,14 @@ Array> BatchNormInferCorrectLayout(const Attrs& attrs, bool BatchNormRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 6); + ICHECK_EQ(types.size(), 6); const auto* data = types[0].as(); if (data == nullptr) return false; const BatchNormAttrs* param = attrs.as(); // axis of -1 means use the last dimension - CHECK(param->axis >= -1 && param->axis < (int)data->shape.size()); + ICHECK(param->axis >= -1 && param->axis < (int)data->shape.size()); int axis = (param->axis != -1) ? param->axis : data->shape.size() - 1; auto axis_size = data->shape[axis]; @@ -666,12 +666,12 @@ TVM_REGISTER_NODE_TYPE(InstanceNormAttrs); bool InstanceNormRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 4); + ICHECK_EQ(types.size(), 4); const auto* data = types[0].as(); if (data == nullptr) return false; const InstanceNormAttrs* param = attrs.as(); int axis = param->axis >= 0 ? param->axis : param->axis + data->shape.size(); - CHECK(axis >= 0 && axis < (int)data->shape.size()); + ICHECK(axis >= 0 && axis < (int)data->shape.size()); reporter->Assign(types[1], TensorType({data->shape[axis]}, data->dtype)); reporter->Assign(types[2], TensorType({data->shape[axis]}, data->dtype)); reporter->Assign(types[3], TensorType(data->shape, data->dtype)); @@ -733,12 +733,12 @@ TVM_REGISTER_NODE_TYPE(LayerNormAttrs); bool LayerNormRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 4); + ICHECK_EQ(types.size(), 4); const auto* data = types[0].as(); if (data == nullptr) return false; const LayerNormAttrs* param = attrs.as(); int axis = param->axis >= 0 ? param->axis : param->axis + data->shape.size(); - CHECK(axis >= 0 && axis < (int)data->shape.size()); + ICHECK(axis >= 0 && axis < (int)data->shape.size()); reporter->Assign(types[1], TensorType({data->shape[axis]}, data->dtype)); reporter->Assign(types[2], TensorType({data->shape[axis]}, data->dtype)); reporter->Assign(types[3], TensorType(data->shape, data->dtype)); @@ -778,12 +778,12 @@ TVM_REGISTER_NODE_TYPE(GroupNormAttrs); bool GroupNormRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 4); + ICHECK_EQ(types.size(), 4); const auto* data = types[0].as(); if (data == nullptr) return false; const GroupNormAttrs* param = attrs.as(); int axis = param->axis >= 0 ? param->axis : param->axis + data->shape.size(); - CHECK(axis >= 0 && axis < (int)data->shape.size()); + ICHECK(axis >= 0 && axis < (int)data->shape.size()); reporter->Assign(types[1], TensorType({data->shape[axis]}, data->dtype)); reporter->Assign(types[2], TensorType({data->shape[axis]}, data->dtype)); reporter->Assign(types[3], TensorType(data->shape, data->dtype)); @@ -847,11 +847,11 @@ If the input has size k on axis 1, then both gamma and beta have shape (k,). // relay.nn.batch_matmul bool BatchMatmulRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* x = types[0].as(); const auto* y = types[1].as(); if (x == nullptr || y == nullptr) return false; - CHECK(x->shape.size() == 3 && y->shape.size() == 3); + ICHECK(x->shape.size() == 3 && y->shape.size() == 3); bool is_dyn = false; Array oshape; for (size_t i = 0; i < 3; ++i) { @@ -867,11 +867,11 @@ bool BatchMatmulRel(const Array& types, int num_inputs, const Attrs& attrs } } if (!is_dyn) { - CHECK(reporter->AssertEQ(x->shape[0], y->shape[0]) || reporter->AssertEQ(x->shape[0], 1) || - reporter->AssertEQ(y->shape[0], 1)) + ICHECK(reporter->AssertEQ(x->shape[0], y->shape[0]) || reporter->AssertEQ(x->shape[0], 1) || + reporter->AssertEQ(y->shape[0], 1)) << "BatchDot: batch dimensions don't match, " << " x shape=" << x->shape << ", y shape=" << y->shape; - CHECK(reporter->AssertEQ(x->shape[2], y->shape[2])) + ICHECK(reporter->AssertEQ(x->shape[2], y->shape[2])) << "BatchDot: shapes of x and y is inconsistent, " << " x shape=" << x->shape << ", y shape=" << y->shape; @@ -913,19 +913,19 @@ are data in batch. // relay.nn.cross_entropy bool CrossEntropyRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* x = types[0].as(); const auto* y = types[1].as(); if (x == nullptr || y == nullptr) return false; - CHECK(x->shape.size() == 2 && y->shape.size() == 2) + ICHECK(x->shape.size() == 2 && y->shape.size() == 2) << "CrossEntropy: shapes of x and y is inconsistent, " << "x shape = " << x->shape << ", " << "y shape = " << y->shape; - CHECK(reporter->AssertEQ(x->shape[0], y->shape[0])) + ICHECK(reporter->AssertEQ(x->shape[0], y->shape[0])) << "CrossEntropy: shapes of x and y is inconsistent, " << "x shape = " << x->shape << ", " << "y shape = " << y->shape; - CHECK(reporter->AssertEQ(x->shape[1], y->shape[1])) + ICHECK(reporter->AssertEQ(x->shape[1], y->shape[1])) << "CrossEntropy: shapes of x and y is inconsistent, " << "x shape = " << x->shape << ", " << "y shape = " << y->shape; @@ -958,11 +958,11 @@ TVM_REGISTER_NODE_TYPE(DilateAttrs); bool DilateRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* x = types[0].as(); const DilateAttrs* param = attrs.as(); if (x == nullptr) return false; - CHECK_EQ(x->shape.size(), param->strides.size()); + ICHECK_EQ(x->shape.size(), param->strides.size()); std::vector oshape; for (size_t i = 0; i < param->strides.size(); ++i) { @@ -1022,18 +1022,18 @@ TVM_REGISTER_NODE_TYPE(SubPixelAttrs); bool DepthToSpaceRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) return false; static const Layout kNCHW("NCHW"); const SubPixelAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const int block_size = param->block_size; const Layout in_layout(param->layout); auto layout_converter = tir::BijectiveLayout(in_layout, kNCHW); - CHECK(layout_converter.defined()) + ICHECK(layout_converter.defined()) << "DepthToSpace only support input layouts that are convertible from NCHW." << " But got " << in_layout; @@ -1085,18 +1085,18 @@ RELAY_REGISTER_OP("nn.depth_to_space") bool SpaceToDepthRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) return false; static const Layout kNCHW("NCHW"); const SubPixelAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const int block_size = param->block_size; const Layout in_layout(param->layout); auto layout_converter = tir::BijectiveLayout(in_layout, kNCHW); - CHECK(layout_converter.defined()) + ICHECK(layout_converter.defined()) << "SpaceToDepth only support input layouts that are convertible from NCHW." << " But got " << in_layout; diff --git a/src/relay/op/nn/nn.h b/src/relay/op/nn/nn.h index e7f5a4b9d618..30ef3079e565 100644 --- a/src/relay/op/nn/nn.h +++ b/src/relay/op/nn/nn.h @@ -37,15 +37,15 @@ namespace relay { template bool DenseRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); const auto* weight = types[1].as(); if (data == nullptr) return false; const AttrType* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); - CHECK(static_cast(data->shape.size()) != 0); + ICHECK(static_cast(data->shape.size()) != 0); Array oshape = data->shape; if (param->units.defined()) { @@ -62,9 +62,9 @@ bool DenseRel(const Array& types, int num_inputs, const Attrs& attrs, } else { if (weight == nullptr) return false; Array wshape = weight->shape; - CHECK(static_cast(weight->shape.size()) == 2); + ICHECK(static_cast(weight->shape.size()) == 2); if (!data->shape.back().as()) { - CHECK(reporter->AssertEQ(data->shape[data->shape.size() - 1], weight->shape[1])) + ICHECK(reporter->AssertEQ(data->shape[data->shape.size() - 1], weight->shape[1])) << "DenseRel: input dimension doesn't match," << " data shape=" << data->shape << ", weight shape=" << weight->shape; } diff --git a/src/relay/op/nn/pad.cc b/src/relay/op/nn/pad.cc index 45447e155135..5b9988b101eb 100644 --- a/src/relay/op/nn/pad.cc +++ b/src/relay/op/nn/pad.cc @@ -55,8 +55,8 @@ Array> PadInferCorrectLayout(const Attrs& attrs, const Array> axis_pad_width; int index_counter = 0; - CHECK_EQ(new_in_layouts.size(), 1); - CHECK_EQ(old_in_layouts.size(), 1); + ICHECK_EQ(new_in_layouts.size(), 1); + ICHECK_EQ(old_in_layouts.size(), 1); for (auto iter_var : old_in_layouts[0]->axes) { const auto& old_layout_axis = LayoutAxis::Get(iter_var); axis_pad_width.emplace(old_layout_axis.name(), params->pad_width[index_counter]); @@ -75,7 +75,7 @@ Array> PadInferCorrectLayout(const Attrs& attrs, const Array> PadInferCorrectLayout(const Attrs& attrs, const Array> PadInferCorrectLayout(const Attrs& attrs, const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) return false; const PadAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); // check that pad widths match lengths - CHECK(data->shape.size() == param->pad_width.size()) + ICHECK(data->shape.size() == param->pad_width.size()) << "There should be as many pad width pairs as shape dimensions " << "but the shape has " << data->shape.size() << " dimensions " << "and there are " << param->pad_width.size() << " pad width pairs."; @@ -130,19 +130,19 @@ bool PadRel(const Array& types, int num_inputs, const Attrs& attrs, // each pad width element should be a pair of positive integers std::vector oshape; for (size_t i = 0; i < param->pad_width.size(); i++) { - CHECK(param->pad_width[i].size() == 2) + ICHECK(param->pad_width[i].size() == 2) << "Each pad width element should be a pair but at index " << i << " there are " << param->pad_width[i].size() << " elements."; auto width1 = tir::as_const_int(param->pad_width[i][0]); auto width2 = tir::as_const_int(param->pad_width[i][1]); - CHECK(width1 != nullptr); - CHECK(width2 != nullptr); + ICHECK(width1 != nullptr); + ICHECK(width2 != nullptr); - CHECK(*width1 >= 0) << "Param width elements should be positive but first pad width at " - << "index " << i << " is " << *width1 << "."; - CHECK(*width2 >= 0) << "Param width elements should be positive but first pad width at " - << "index " << i << " is " << *width2 << "."; + ICHECK(*width1 >= 0) << "Param width elements should be positive but first pad width at " + << "index " << i << " is " << *width1 << "."; + ICHECK(*width2 >= 0) << "Param width elements should be positive but first pad width at " + << "index " << i << " is " << *width2 << "."; if (!data->shape[i].as()) { auto padding = tir::make_const(data->shape[i].dtype(), *width1 + *width2); @@ -159,10 +159,10 @@ bool PadRel(const Array& types, int num_inputs, const Attrs& attrs, Array PadCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); auto pad_width = param->pad_width; - CHECK(pad_width.size() == inputs[0].ndim() && pad_width[0].size() == 2) << "Illegal pad_width"; + ICHECK(pad_width.size() == inputs[0].ndim() && pad_width[0].size() == 2) << "Illegal pad_width"; Array pad_before; for (size_t i = 0; i < pad_width.size(); ++i) { pad_before.push_back(pad_width[i][0]); @@ -207,15 +207,15 @@ TVM_REGISTER_NODE_TYPE(MirrorPadAttrs); bool MirrorPadRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) return false; const MirrorPadAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); // check that pad widths match lengths - CHECK(data->shape.size() == param->pad_width.size()) + ICHECK(data->shape.size() == param->pad_width.size()) << "There should be as many pad width pairs as shape dimensions " << "but the shape has " << data->shape.size() << " dimensions " << "and there are " << param->pad_width.size() << " pad width pairs."; @@ -223,19 +223,19 @@ bool MirrorPadRel(const Array& types, int num_inputs, const Attrs& attrs, // each pad width element should be a pair of positive integers std::vector oshape; for (size_t i = 0; i < param->pad_width.size(); i++) { - CHECK(param->pad_width[i].size() == 2) + ICHECK(param->pad_width[i].size() == 2) << "Each pad width element should be a pair but at index " << i << " there are " << param->pad_width[i].size() << " elements."; auto width1 = tir::as_const_int(param->pad_width[i][0]); auto width2 = tir::as_const_int(param->pad_width[i][1]); - CHECK(width1 != nullptr); - CHECK(width2 != nullptr); + ICHECK(width1 != nullptr); + ICHECK(width2 != nullptr); - CHECK(*width1 >= 0) << "Param width elements should be positive but first pad width at " - << "index " << i << " is " << *width1 << "."; - CHECK(*width2 >= 0) << "Param width elements should be positive but first pad width at " - << "index " << i << " is " << *width2 << "."; + ICHECK(*width1 >= 0) << "Param width elements should be positive but first pad width at " + << "index " << i << " is " << *width1 << "."; + ICHECK(*width2 >= 0) << "Param width elements should be positive but first pad width at " + << "index " << i << " is " << *width2 << "."; auto padding = tir::make_const(data->shape[i].dtype(), *width1 + *width2); oshape.push_back(data->shape[i] + padding); diff --git a/src/relay/op/nn/pooling.cc b/src/relay/op/nn/pooling.cc index cee7b6456ce6..4fb1745d65aa 100644 --- a/src/relay/op/nn/pooling.cc +++ b/src/relay/op/nn/pooling.cc @@ -50,7 +50,7 @@ Array > PoolInferCorrectLayout(const Attrs& attrs, if (new_in_layouts.defined()) { // Set the pool with the new layout. - CHECK_EQ(new_in_layouts.size(), 1); + ICHECK_EQ(new_in_layouts.size(), 1); params->layout = new_in_layouts[0].name(); } @@ -61,20 +61,20 @@ Array > PoolInferCorrectLayout(const Attrs& attrs, template bool Pool2DRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) return false; const auto dshape = data->shape; - CHECK_GE(dshape.size(), 2U) + ICHECK_GE(dshape.size(), 2U) << "Pool2D only support input >= 2-D: input must have height and width"; const auto param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); Layout layout(param->layout); - CHECK(layout.Contains(LayoutAxis::Get('H')) && layout.Contains(LayoutAxis::Get('W')) && - !layout.Contains(LayoutAxis::Get('h')) && !layout.Contains(LayoutAxis::Get('w'))) + ICHECK(layout.Contains(LayoutAxis::Get('H')) && layout.Contains(LayoutAxis::Get('W')) && + !layout.Contains(LayoutAxis::Get('h')) && !layout.Contains(LayoutAxis::Get('w'))) << "Invalid layout " << layout << ". Pool2D layout must have H and W, which cannot be split"; const auto hidx = layout.IndexOf(LayoutAxis::Get('H')); @@ -131,21 +131,21 @@ Array Pool2DCompute(const Attrs& attrs, const Array& inp const Type& out_type) { static const Layout kNCHW("NCHW"); const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); auto pool_size = param->pool_size; auto strides = param->strides; auto padding = param->padding; auto ceil_mode = param->ceil_mode; Layout layout(param->layout); - CHECK(tir::BijectiveLayout(layout, kNCHW).defined()) + ICHECK(tir::BijectiveLayout(layout, kNCHW).defined()) << "max_pool2d currently only supports layouts that are convertible from NCHW"; - CHECK_EQ(layout.IndexOf(LayoutAxis::Get('h')), -1) + ICHECK_EQ(layout.IndexOf(LayoutAxis::Get('h')), -1) << "max_pool2d does not support input split on height"; - CHECK_EQ(layout.IndexOf(LayoutAxis::Get('w')), -1) + ICHECK_EQ(layout.IndexOf(LayoutAxis::Get('w')), -1) << "max_pool2d does not support input split on width"; - CHECK(inputs[0].ndim() == 4U || inputs[0].ndim() == 5U || inputs[0].ndim() == 6U) + ICHECK(inputs[0].ndim() == 4U || inputs[0].ndim() == 5U || inputs[0].ndim() == 6U) << "Pool2D only support 4-D input (e.g., NCHW)" << " or 5-D input (e.g. NCHWc on for vector instructions)" << " or 6-D input (e.g. NCHWnc for tensor accelerators)"; @@ -248,20 +248,20 @@ TVM_REGISTER_NODE_TYPE(GlobalPool2DAttrs); bool GlobalPool2DRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) { return false; } const auto dshape = data->shape; - CHECK_GE(dshape.size(), 2U) + ICHECK_GE(dshape.size(), 2U) << "Pool2D only support input >= 2-D: input must have height and width"; const auto param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); Layout layout(param->layout); - CHECK(layout.Contains(LayoutAxis::Get('H')) && layout.Contains(LayoutAxis::Get('W')) && - !layout.Contains(LayoutAxis::Get('h')) && !layout.Contains(LayoutAxis::Get('w'))) + ICHECK(layout.Contains(LayoutAxis::Get('H')) && layout.Contains(LayoutAxis::Get('W')) && + !layout.Contains(LayoutAxis::Get('h')) && !layout.Contains(LayoutAxis::Get('w'))) << "Invalid layout " << layout << ". Pool2D layout must have H and W, which cannot be split"; const auto hidx = layout.IndexOf(LayoutAxis::Get('H')); @@ -280,16 +280,16 @@ Array GlobalPool2DCompute(const Attrs& attrs, const Array(); - CHECK(param != nullptr); + ICHECK(param != nullptr); Layout layout(param->layout); - CHECK(tir::BijectiveLayout(layout, kNCHW).defined()) + ICHECK(tir::BijectiveLayout(layout, kNCHW).defined()) << "global_avg_pool2d currently only supports layouts that are convertible from NCHW"; - CHECK_EQ(layout.IndexOf(LayoutAxis::Get('h')), -1) + ICHECK_EQ(layout.IndexOf(LayoutAxis::Get('h')), -1) << "global_avg_pool2d does not support input split on height"; - CHECK_EQ(layout.IndexOf(LayoutAxis::Get('w')), -1) + ICHECK_EQ(layout.IndexOf(LayoutAxis::Get('w')), -1) << "global_avg_pool2d does not support input split on width"; - CHECK(inputs[0].ndim() == 4U || inputs[0].ndim() == 5U) + ICHECK(inputs[0].ndim() == 4U || inputs[0].ndim() == 5U) << "Pool2D only support 4-D input (e.g., NCHW)" << " or 5-D input (last dimension is a split of channel)"; return Array{topi::nn::global_pool(inputs[0], mode, layout.name())}; @@ -354,27 +354,27 @@ TVM_REGISTER_NODE_TYPE(AdaptivePool2DAttrs); bool AdaptivePool2DRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) { return false; } const auto dshape = data->shape; - CHECK_GE(dshape.size(), 2U) + ICHECK_GE(dshape.size(), 2U) << "Pool2D only support input >= 2-D: input must have height and width"; const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); Layout layout(param->layout); - CHECK(layout.Contains(LayoutAxis::Get('H')) && layout.Contains(LayoutAxis::Get('W')) && - !layout.Contains(LayoutAxis::Get('h')) && !layout.Contains(LayoutAxis::Get('w'))) + ICHECK(layout.Contains(LayoutAxis::Get('H')) && layout.Contains(LayoutAxis::Get('W')) && + !layout.Contains(LayoutAxis::Get('h')) && !layout.Contains(LayoutAxis::Get('w'))) << "Invalid layout " << layout << ". Pool2D layout must have H and W, which cannot be split"; const auto hidx = layout.IndexOf(LayoutAxis::Get('H')); const auto widx = layout.IndexOf(LayoutAxis::Get('W')); Array oshape(dshape); auto output_size = param->output_size; - CHECK_LE(output_size.size(), 2U) << "output_size can have up to 2 elements."; + ICHECK_LE(output_size.size(), 2U) << "output_size can have up to 2 elements."; IndexExpr output_height, output_width; if (output_size.empty()) { output_height = dshape[hidx]; @@ -400,16 +400,16 @@ Array AdaptivePool2DCompute(const Attrs& attrs, const Array(); - CHECK(param != nullptr); + ICHECK(param != nullptr); Layout layout(param->layout); - CHECK(tir::BijectiveLayout(layout, kNCHW).defined()) + ICHECK(tir::BijectiveLayout(layout, kNCHW).defined()) << "Adaptive pool2d currently only supports layouts that are convertible from NCHW"; - CHECK_EQ(layout.IndexOf(LayoutAxis::Get('h')), -1) + ICHECK_EQ(layout.IndexOf(LayoutAxis::Get('h')), -1) << "Adaptive pool2d does not support input split on height"; - CHECK_EQ(layout.IndexOf(LayoutAxis::Get('w')), -1) + ICHECK_EQ(layout.IndexOf(LayoutAxis::Get('w')), -1) << "Adaptive pool2d does not support input split on width"; - CHECK(inputs[0].ndim() == 4U || inputs[0].ndim() == 5U) + ICHECK(inputs[0].ndim() == 4U || inputs[0].ndim() == 5U) << "Pool2D only support 4-D input (e.g., NCHW)" << " or 5-D input (last dimension is a split of channel)"; @@ -505,21 +505,21 @@ TVM_REGISTER_NODE_TYPE(AdaptivePool3DAttrs); bool AdaptivePool3DRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) { return false; } const auto dshape = data->shape; - CHECK_GE(dshape.size(), 3U) + ICHECK_GE(dshape.size(), 3U) << "Pool3D only support input >= 3-D: input must have depth, height and width"; const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); Layout layout(param->layout); - CHECK(layout.Contains(LayoutAxis::Get('D')) && layout.Contains(LayoutAxis::Get('H')) && - layout.Contains(LayoutAxis::Get('W')) && !layout.Contains(LayoutAxis::Get('d')) && - !layout.Contains(LayoutAxis::Get('h')) && !layout.Contains(LayoutAxis::Get('w'))) + ICHECK(layout.Contains(LayoutAxis::Get('D')) && layout.Contains(LayoutAxis::Get('H')) && + layout.Contains(LayoutAxis::Get('W')) && !layout.Contains(LayoutAxis::Get('d')) && + !layout.Contains(LayoutAxis::Get('h')) && !layout.Contains(LayoutAxis::Get('w'))) << "Invalid layout " << layout << ". Pool3D layout must have D, H and W, which cannot be split"; @@ -528,7 +528,7 @@ bool AdaptivePool3DRel(const Array& types, int num_inputs, const Attrs& at const auto widx = layout.IndexOf(LayoutAxis::Get('W')); Array oshape(dshape); auto output_size = param->output_size; - CHECK_LE(output_size.size(), 3U) << "output_size can have up to 3 elements."; + ICHECK_LE(output_size.size(), 3U) << "output_size can have up to 3 elements."; IndexExpr output_depth, output_height, output_width; if (output_size.empty()) { output_depth = dshape[didx]; @@ -558,18 +558,18 @@ Array AdaptivePool3DCompute(const Attrs& attrs, const Array(); - CHECK(param != nullptr); + ICHECK(param != nullptr); Layout layout(param->layout); - CHECK(tir::BijectiveLayout(layout, kNCDHW).defined()) + ICHECK(tir::BijectiveLayout(layout, kNCDHW).defined()) << "Adaptive pool3d currently only supports layouts that are convertible from NCDHW"; - CHECK_EQ(layout.IndexOf(LayoutAxis::Get('d')), -1) + ICHECK_EQ(layout.IndexOf(LayoutAxis::Get('d')), -1) << "Adaptive pool3d does not support input split on depth"; - CHECK_EQ(layout.IndexOf(LayoutAxis::Get('h')), -1) + ICHECK_EQ(layout.IndexOf(LayoutAxis::Get('h')), -1) << "Adaptive pool3d does not support input split on height"; - CHECK_EQ(layout.IndexOf(LayoutAxis::Get('w')), -1) + ICHECK_EQ(layout.IndexOf(LayoutAxis::Get('w')), -1) << "Adaptive pool3d does not support input split on width"; - CHECK(inputs[0].ndim() == 5U || inputs[0].ndim() == 6U) + ICHECK(inputs[0].ndim() == 5U || inputs[0].ndim() == 6U) << "Pool3D only support 5-D input (e.g., NCDHW)" << " or 6-D input (last dimension is a split of channel)"; @@ -666,7 +666,7 @@ RELAY_REGISTER_OP("nn.adaptive_avg_pool3d") bool Pool2DGradRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[1].as(); if (data == nullptr) return false; @@ -681,26 +681,26 @@ Array Pool2DGradCompute(const Attrs& attrs, const Array& const Type& out_type) { static const Layout kNCHW("NCHW"); const auto* param = attrs.as(); - CHECK(param != nullptr); - CHECK_EQ(inputs.size(), 2); + ICHECK(param != nullptr); + ICHECK_EQ(inputs.size(), 2); auto pool_size = param->pool_size; auto strides = param->strides; auto padding = param->padding; auto ceil_mode = param->ceil_mode; Layout layout(param->layout); - CHECK(tir::BijectiveLayout(layout, kNCHW).defined()) + ICHECK(tir::BijectiveLayout(layout, kNCHW).defined()) << "pool2d_grad currently only supports layouts that are convertible from NCHW"; - CHECK_EQ(layout.IndexOf(LayoutAxis::Get('h')), -1) + ICHECK_EQ(layout.IndexOf(LayoutAxis::Get('h')), -1) << "pool2d_grad does not support input split on height"; - CHECK_EQ(layout.IndexOf(LayoutAxis::Get('w')), -1) + ICHECK_EQ(layout.IndexOf(LayoutAxis::Get('w')), -1) << "pool2d_grad does not support input split on width"; - CHECK(inputs[0].ndim() == 4U || inputs[0].ndim() == 5U) + ICHECK(inputs[0].ndim() == 4U || inputs[0].ndim() == 5U) << "Pool2DGrad only support 4-D output gradient (e.g., NCHW)" << " or 5-D output gradient (last dimension is a split of channel)"; - CHECK(inputs[1].ndim() == 4U || inputs[1].ndim() == 5U) + ICHECK(inputs[1].ndim() == 4U || inputs[1].ndim() == 5U) << "Pool2DGrad only support 4-D input (e.g., NCHW)" << " or 5-D input (last dimension is a split of channel)"; @@ -823,18 +823,18 @@ TVM_REGISTER_NODE_TYPE(AvgPool1DAttrs); template bool Pool1DRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) return false; const auto dshape = data->shape; - CHECK_GE(dshape.size(), 1U) << "Pool1D only support input >= 1-D: input must have width"; + ICHECK_GE(dshape.size(), 1U) << "Pool1D only support input >= 1-D: input must have width"; const auto param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); Layout layout(param->layout); - CHECK(layout.Contains(LayoutAxis::Get('W')) && !layout.Contains(LayoutAxis::Get('w'))) + ICHECK(layout.Contains(LayoutAxis::Get('W')) && !layout.Contains(LayoutAxis::Get('w'))) << "Invalid layout " << layout << ". Pool1D layout must have W, which cannot be split"; const auto widx = layout.IndexOf(LayoutAxis::Get('W')); @@ -873,19 +873,19 @@ Array Pool1DCompute(const Attrs& attrs, const Array& inp const Type& out_type) { static const Layout kNCW("NCW"); const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); auto pool_size = param->pool_size; auto strides = param->strides; auto padding = param->padding; auto ceil_mode = param->ceil_mode; Layout layout(param->layout); - CHECK(tir::BijectiveLayout(layout, kNCW).defined()) + ICHECK(tir::BijectiveLayout(layout, kNCW).defined()) << "max_pool1d currently only supports layouts that are convertible from NCW"; - CHECK_EQ(layout.IndexOf(LayoutAxis::Get('w')), -1) + ICHECK_EQ(layout.IndexOf(LayoutAxis::Get('w')), -1) << "max_pool1d does not support input split on width"; - CHECK(inputs[0].ndim() == 3U || inputs[0].ndim() == 4U || inputs[0].ndim() == 5U) + ICHECK(inputs[0].ndim() == 3U || inputs[0].ndim() == 4U || inputs[0].ndim() == 5U) << "Pool1D only support 3-D input (e.g., NCW)" << " or 4-D input (e.g. NCWc on for vector instructions)" << " or 5-D input (e.g. NCWnc for tensor accelerators)"; @@ -982,21 +982,21 @@ TVM_REGISTER_NODE_TYPE(AvgPool3DAttrs); template bool Pool3DRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) return false; const auto dshape = data->shape; - CHECK_GE(dshape.size(), 3U) + ICHECK_GE(dshape.size(), 3U) << "Pool3D only support input >= 3-D: input must have depth, height and width"; const auto param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); Layout layout(param->layout); - CHECK(layout.Contains(LayoutAxis::Get('D')) && layout.Contains(LayoutAxis::Get('H')) && - layout.Contains(LayoutAxis::Get('W')) && !layout.Contains(LayoutAxis::Get('d')) && - !layout.Contains(LayoutAxis::Get('h')) && !layout.Contains(LayoutAxis::Get('w'))) + ICHECK(layout.Contains(LayoutAxis::Get('D')) && layout.Contains(LayoutAxis::Get('H')) && + layout.Contains(LayoutAxis::Get('W')) && !layout.Contains(LayoutAxis::Get('d')) && + !layout.Contains(LayoutAxis::Get('h')) && !layout.Contains(LayoutAxis::Get('w'))) << "Invalid layout " << layout << ". Pool3D layout must have D, H and W, which cannot be split"; @@ -1051,23 +1051,23 @@ Array Pool3DCompute(const Attrs& attrs, const Array& inp const Type& out_type) { static const Layout kNCDHW("NCDHW"); const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); auto pool_size = param->pool_size; auto strides = param->strides; auto padding = param->padding; auto ceil_mode = param->ceil_mode; Layout layout(param->layout); - CHECK(tir::BijectiveLayout(layout, kNCDHW).defined()) + ICHECK(tir::BijectiveLayout(layout, kNCDHW).defined()) << "max_pool3d currently only supports layouts that are convertible from NCDHW"; - CHECK_EQ(layout.IndexOf(LayoutAxis::Get('d')), -1) + ICHECK_EQ(layout.IndexOf(LayoutAxis::Get('d')), -1) << "max_pool3d does not support input split on depth"; - CHECK_EQ(layout.IndexOf(LayoutAxis::Get('h')), -1) + ICHECK_EQ(layout.IndexOf(LayoutAxis::Get('h')), -1) << "max_pool3d does not support input split on height"; - CHECK_EQ(layout.IndexOf(LayoutAxis::Get('w')), -1) + ICHECK_EQ(layout.IndexOf(LayoutAxis::Get('w')), -1) << "max_pool3d does not support input split on width"; - CHECK(inputs[0].ndim() == 4U || inputs[0].ndim() == 5U || inputs[0].ndim() == 6U) + ICHECK(inputs[0].ndim() == 4U || inputs[0].ndim() == 5U || inputs[0].ndim() == 6U) << "Pool3D only support 5-D input (e.g., NCDHW)" << " or 6-D input (e.g. NCDHWc on for vector instructions)" << " or 7-D input (e.g. NCDHWnc for tensor accelerators)"; diff --git a/src/relay/op/nn/sparse.cc b/src/relay/op/nn/sparse.cc index 3f51e1f8ab37..09dca09a82de 100644 --- a/src/relay/op/nn/sparse.cc +++ b/src/relay/op/nn/sparse.cc @@ -38,10 +38,10 @@ TVM_REGISTER_NODE_TYPE(SparseDenseAttrs); bool SparseDenseRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 5); + ICHECK_EQ(types.size(), 5); const auto* data = types[0].as(); const auto* weight_data = types[1].as(); - CHECK(weight_data->shape.size() == 1 || weight_data->shape.size() == 3); + ICHECK(weight_data->shape.size() == 1 || weight_data->shape.size() == 3); const auto* weight_indptr = types[3].as(); if (data == nullptr) return false; @@ -131,11 +131,11 @@ TVM_REGISTER_NODE_TYPE(SparseTransposeAttrs); bool SparseTransposeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 4); + ICHECK_EQ(types.size(), 4); const auto* sparse_data = types[0].as(); - CHECK_EQ(sparse_data->shape.size(), 1); + ICHECK_EQ(sparse_data->shape.size(), 1); const auto* sparse_indices = types[1].as(); - CHECK_EQ(sparse_indices->shape.size(), 1); + ICHECK_EQ(sparse_indices->shape.size(), 1); const auto* sparse_indptr = types[2].as(); std::vector output_types; diff --git a/src/relay/op/nn/upsampling.cc b/src/relay/op/nn/upsampling.cc index bdf3090cefad..3b0139b16b1b 100644 --- a/src/relay/op/nn/upsampling.cc +++ b/src/relay/op/nn/upsampling.cc @@ -42,18 +42,18 @@ TVM_REGISTER_NODE_TYPE(UpSampling3DAttrs); bool UpSamplingRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) return false; static const Layout kNCHW("NCHW"); const UpSamplingAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const Layout in_layout(param->layout); auto layout_converter = tir::BijectiveLayout(in_layout, kNCHW); - CHECK(layout_converter.defined()) + ICHECK(layout_converter.defined()) << "UpSampling only support input layouts that are convertible from NCHW." << " But got " << in_layout; @@ -110,18 +110,18 @@ RELAY_REGISTER_OP("nn.upsampling") // UpSampling3D bool UpSampling3DRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) return false; static const Layout kNCDHW("NCDHW"); const UpSampling3DAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const Layout in_layout(param->layout); auto layout_converter = tir::BijectiveLayout(in_layout, kNCDHW); - CHECK(layout_converter.defined()) + ICHECK(layout_converter.defined()) << "UpSampling3D only support input layouts that are convertible from NCDHW." << " But got " << in_layout; diff --git a/src/relay/op/nn/upsampling.h b/src/relay/op/nn/upsampling.h index e4e3bc9b1929..4cd292e78cb6 100644 --- a/src/relay/op/nn/upsampling.h +++ b/src/relay/op/nn/upsampling.h @@ -43,7 +43,7 @@ Array > UpsamplingInferCorrectLayout(const Attrs& attrs, T* params = const_cast(attrs.as()); if (new_in_layouts.defined()) { - CHECK_EQ(new_in_layouts.size(), 1); + ICHECK_EQ(new_in_layouts.size(), 1); Layout raw_layout(params->layout); Layout input = new_in_layouts[0]; diff --git a/src/relay/op/op_common.h b/src/relay/op/op_common.h index d530345fc9e8..6c2c6b2cce69 100644 --- a/src/relay/op/op_common.h +++ b/src/relay/op/op_common.h @@ -151,7 +151,7 @@ inline void GetPaddingWidth(const Array& padding, IndexExpr* pad_w) { } else if (padding.size() == 2) { *pad_w = padding[0] + padding[1]; } else { - CHECK_EQ(padding.size(), 4) << " Expected padding size of 1 or 2, found " << padding.size(); + ICHECK_EQ(padding.size(), 4) << " Expected padding size of 1 or 2, found " << padding.size(); } } @@ -168,7 +168,7 @@ inline void GetPaddingHeightWidth(const Array& padding, IndexExpr* pa *pad_h = padding[0] + padding[2]; *pad_w = padding[1] + padding[3]; } else { - CHECK_EQ(padding.size(), 4) << " Padding size should be 1, 2 or 4, but got " << padding.size(); + ICHECK_EQ(padding.size(), 4) << " Padding size should be 1, 2 or 4, but got " << padding.size(); } } @@ -188,7 +188,7 @@ inline void GetPaddingDepthHeightWidth(const Array& padding, IndexExp *pad_h = padding[1] + padding[4]; *pad_w = padding[2] + padding[5]; } else { - CHECK_EQ(padding.size(), 6) << " Padding size should be 1, 3 or 6, but got " << padding.size(); + ICHECK_EQ(padding.size(), 6) << " Padding size should be 1, 3 or 6, but got " << padding.size(); } } diff --git a/src/relay/op/tensor/binary.cc b/src/relay/op/tensor/binary.cc index df128ff05338..aafd4492fec4 100644 --- a/src/relay/op/tensor/binary.cc +++ b/src/relay/op/tensor/binary.cc @@ -34,7 +34,7 @@ namespace relay { #define RELAY_BINARY_COMPUTE(FTOPI) \ [](const Attrs& attrs, const Array& inputs, \ const Type& out_type) -> Array { \ - CHECK_EQ(inputs.size(), 2U); \ + ICHECK_EQ(inputs.size(), 2U); \ return {FTOPI(inputs[0], inputs[1])}; \ } diff --git a/src/relay/op/tensor/reduce.cc b/src/relay/op/tensor/reduce.cc index 16f5f0116b60..afe45571f558 100644 --- a/src/relay/op/tensor/reduce.cc +++ b/src/relay/op/tensor/reduce.cc @@ -63,12 +63,12 @@ inline std::vector GetReduceAxes(const uint32_t indim, const Array= 0) << "Axis out of bounds in reduce operator."; - CHECK(axis < indim) << "Axis out of bounds in reduce operator."; + ICHECK(axis >= 0) << "Axis out of bounds in reduce operator."; + ICHECK(axis < indim) << "Axis out of bounds in reduce operator."; in_axes.push_back(axis); } - CHECK(in_axes[in_axes.size() - 1] < indim) + ICHECK(in_axes[in_axes.size() - 1] < indim) << "Reduction axis " << in_axes[in_axes.size() - 1] << " exceeds input dimensions " << indim; std::sort(in_axes.begin(), in_axes.end()); @@ -91,7 +91,7 @@ inline std::vector GetReduceAxes(const uint32_t indim, const Array GetExcludeAxes(size_t indim, const Array& inaxis) { - CHECK(inaxis.defined()) << "Cannot set exclude when axis=None"; + ICHECK(inaxis.defined()) << "Cannot set exclude when axis=None"; std::vector axis_flag(indim, true); for (auto i : inaxis) { int64_t axis = i->value; @@ -99,8 +99,8 @@ Array GetExcludeAxes(size_t indim, const Array& inaxis) { axis = axis + static_cast(indim); } // Check out of bounds error - CHECK_GE(axis, 0) << "Axis out of bounds in reduce operator."; - CHECK_LT(axis, static_cast(indim)) << "Axis out of bounds in reduce operator."; + ICHECK_GE(axis, 0) << "Axis out of bounds in reduce operator."; + ICHECK_LT(axis, static_cast(indim)) << "Axis out of bounds in reduce operator."; axis_flag[axis] = false; } @@ -125,7 +125,7 @@ Array> ReduceInferCorrectLayout(const Attrs& attrs, // Get the reduce axes. Array> old_in_shapes; for (auto old_in_t : old_in_types) { - CHECK(old_in_t.as()); + ICHECK(old_in_t.as()); old_in_shapes.push_back(old_in_t.as()->shape); } uint32_t indim = old_in_shapes[0].size(); @@ -135,8 +135,8 @@ Array> ReduceInferCorrectLayout(const Attrs& attrs, if (new_in_layouts.defined() && r_axes.size()) { // Adapt to new layout. The axis has to change. Record original reduce axes. Convert to the // modified layout axes. - CHECK_EQ(new_in_layouts.size(), 1); - CHECK_EQ(old_in_layouts.size(), 1); + ICHECK_EQ(new_in_layouts.size(), 1); + ICHECK_EQ(old_in_layouts.size(), 1); // 1) Collect the original axes std::unordered_set old_r_dims; @@ -166,7 +166,7 @@ Array> ReduceInferCorrectLayout(const Attrs& attrs, params->axis = new_r_axes; } else if (old_in_layouts.defined()) { // If the new layout is undefined, set the old layout as the inferred layout. - CHECK_EQ(old_in_layouts.size(), 1); + ICHECK_EQ(old_in_layouts.size(), 1); ret = old_in_layouts[0]; } @@ -177,7 +177,7 @@ template Array ReduceCompute(const Attrs& attrs, const Array& inputs, const Type& out_type, F f) { const ReduceAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); if (inputs[0]->shape.size() == 0) { return {topi::identity(inputs[0])}; } @@ -221,8 +221,8 @@ inline std::vector ReduceShapeImpl(const std::vector& in_s } if (is_dynamic_input) { - CHECK(reporter->Assert(max_shape < - tir::make_const(DataType::Int(64), std::numeric_limits::max()))) + ICHECK(reporter->Assert( + max_shape < tir::make_const(DataType::Int(64), std::numeric_limits::max()))) << "The maximum possible index of reduced shape cannot be more than int32 max."; } @@ -259,14 +259,14 @@ inline std::vector ReduceShapeImpl(const std::vector& in_s */ bool ArgReduceRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) return false; - CHECK(static_cast(data->shape.size()) != 0); + ICHECK(static_cast(data->shape.size()) != 0); std::vector in_shape(data->shape.begin(), data->shape.end()); const ReduceAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); // assign output type and shape auto oshape = ReduceShapeImpl(in_shape, param, reporter); @@ -283,13 +283,13 @@ bool ArgReduceRel(const Array& types, int num_inputs, const Attrs& attrs, */ bool ReduceRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) return false; std::vector in_shape(data->shape.begin(), data->shape.end()); const ReduceAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); // assign output type and shape auto oshape = ReduceShapeImpl(in_shape, param, reporter); @@ -501,7 +501,7 @@ Array MeanCompute(const Attrs& attrs, const Array& input const Type& out_type) { IndexExpr count = tir::make_const(inputs[0]->dtype, 1); const ReduceAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); auto axes = param->axis; for (int64_t i : GetReduceAxes(inputs[0]->shape.size(), param->axis, param->exclude)) { count *= inputs[0]->shape[i]; @@ -537,19 +537,19 @@ Example:: bool VarianceRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); if (data == nullptr) return false; - CHECK(static_cast(data->shape.size()) != 0); + ICHECK(static_cast(data->shape.size()) != 0); const auto* mean = types[1].as(); if (mean == nullptr) return false; std::vector in_shape(data->shape.begin(), data->shape.end()); std::vector mean_shape(mean->shape.begin(), mean->shape.end()); - CHECK_EQ(in_shape.size(), mean_shape.size()); + ICHECK_EQ(in_shape.size(), mean_shape.size()); const VarianceAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); // assign output type and shape auto oshape = ReduceShapeImpl(in_shape, param, reporter); @@ -561,7 +561,7 @@ Array VarianceCompute(const Attrs& attrs, const Array& i const Type& out_type) { IndexExpr count = tir::make_const(inputs[0]->dtype, 1); const VarianceAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); auto axes = param->axis; bool unbiased = param->unbiased; auto data = inputs[0]; @@ -576,7 +576,7 @@ Array VarianceCompute(const Attrs& attrs, const Array& i auto sq_diff = topi::power(topi::subtract(data, mean), 2); if (param->exclude) { axes = GetExcludeAxes(sq_diff->shape.size(), param->axis); - CHECK_NE(axes.size(), 0); + ICHECK_NE(axes.size(), 0); } auto var = topi::divide(topi::sum(sq_diff, axes, param->keepdims, false), count); diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc index 6dda0e936e6a..68582b4918fe 100644 --- a/src/relay/op/tensor/transform.cc +++ b/src/relay/op/tensor/transform.cc @@ -55,10 +55,10 @@ TVM_REGISTER_NODE_TYPE(CastAttrs); bool CastRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) { - CHECK(types[0].as()) + ICHECK(types[0].as()) << "cast: expect input type to be TensorType but get " << types[0]; return false; } @@ -70,7 +70,7 @@ bool CastRel(const Array& types, int num_inputs, const Attrs& attrs, Array CastCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const CastAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); DataType dtype = param->dtype; return {topi::cast(inputs[0], dtype)}; } @@ -100,16 +100,16 @@ RELAY_REGISTER_OP("cast") // relay.cast_like bool CastLikeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); if (data == nullptr) { - CHECK(types[0].as()) + ICHECK(types[0].as()) << "cast: expect input type to be TensorType but get " << types[0]; return false; } const auto* dtype_like = types[1].as(); if (dtype_like == nullptr) { - CHECK(types[1].as()) + ICHECK(types[1].as()) << "cast: expect input type to be TensorType but get " << types[1]; return false; } @@ -144,7 +144,7 @@ RELAY_REGISTER_OP("cast_like") Array ReinterpretCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const CastAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); DataType dtype = param->dtype; return {topi::reinterpret(inputs[0], dtype)}; } @@ -178,10 +178,10 @@ TVM_REGISTER_NODE_TYPE(ExpandDimsAttrs); bool ExpandDimsRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // `types` contains: [data, result] - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) { - CHECK(types[0].as()) + ICHECK(types[0].as()) << "expand_dims: expect input type to be TensorType but get " << types[0]; return false; } @@ -189,9 +189,9 @@ bool ExpandDimsRel(const Array& types, int num_inputs, const Attrs& attrs, const int ndim = static_cast(data->shape.size()); const int axis = param->axis; const int num_newaxis = param->num_newaxis; - CHECK(num_newaxis >= 0) << "expand_dims only accepts `num_newaxis >= 0`" - << ", but got num_newaxis = " << num_newaxis; - CHECK(-ndim - 1 <= axis && axis <= ndim) + ICHECK(num_newaxis >= 0) << "expand_dims only accepts `num_newaxis >= 0`" + << ", but got num_newaxis = " << num_newaxis; + ICHECK(-ndim - 1 <= axis && axis <= ndim) << "expand_dims only accepts `axis` in [-data.ndim - 1, data.ndim]" << ", but got axis = " << axis << ", and data.ndim = " << ndim; const int pivot = axis < 0 ? ndim + axis + 1 : axis; @@ -213,7 +213,7 @@ bool ExpandDimsRel(const Array& types, int num_inputs, const Attrs& attrs, Array ExpandDimsCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const ExpandDimsAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); return {topi::expand_dims(inputs[0], param->axis, param->num_newaxis)}; } @@ -247,7 +247,7 @@ TVM_REGISTER_NODE_TYPE(ConcatenateAttrs); Array ConcatenateCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const ConcatenateAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); return {topi::concatenate(inputs, param->axis)}; } @@ -282,10 +282,10 @@ TVM_REGISTER_NODE_TYPE(StackAttrs); bool StackRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // types: [data, result] - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* tensor_tuple = types[0].as(); if (tensor_tuple == nullptr) { - CHECK(types[0].as()) + ICHECK(types[0].as()) << "cast: expect input type to be TupleType but get " << types[0]; return false; } @@ -295,7 +295,7 @@ bool StackRel(const Array& types, int num_inputs, const Attrs& attrs, // Sanity check: axis int axis = param->axis; - CHECK(-(ndim + 1) <= axis && axis < ndim + 1) + ICHECK(-(ndim + 1) <= axis && axis < ndim + 1) << "stack only accepts `axis` in [-(ndim+1), ndim+1)" << ", but got axis = " << axis << ", and ndim = " << ndim; axis = axis < 0 ? ndim + axis + 1 : axis; @@ -306,8 +306,8 @@ bool StackRel(const Array& types, int num_inputs, const Attrs& attrs, const auto& e = Downcast(ele); int e_ndim = static_cast(e->shape.size()); const DataType& e_dtype = e->dtype; - CHECK_EQ(e_ndim, ndim) << "relay.stack requires all tensors have the same ndim"; - CHECK_EQ(e_dtype, dtype) << "relay.stack requires all tensors have the same dtype"; + ICHECK_EQ(e_ndim, ndim) << "relay.stack requires all tensors have the same ndim"; + ICHECK_EQ(e_dtype, dtype) << "relay.stack requires all tensors have the same dtype"; for (size_t j = 0; j < first->shape.size(); ++j) { if (j == static_cast(axis)) continue; if (first->shape[j].as() || e->shape[j].as() || @@ -337,7 +337,7 @@ bool StackRel(const Array& types, int num_inputs, const Attrs& attrs, Array StackCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const StackAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); return {topi::stack(inputs, param->axis)}; } @@ -372,10 +372,10 @@ TVM_REGISTER_NODE_TYPE(TransposeAttrs); bool TransposeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // types: [data, result] - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) { - CHECK(types[0].as()) + ICHECK(types[0].as()) << "transpose: expect input type to be TensorType but get " << types[0]; return false; } @@ -383,7 +383,7 @@ bool TransposeRel(const Array& types, int num_inputs, const Attrs& attrs, const int ndim = data->shape.size(); const Array& axes = param->axes; // check dimension match - CHECK(!axes.defined() || static_cast(axes.size()) == ndim) + ICHECK(!axes.defined() || static_cast(axes.size()) == ndim) << "Dimension mismatch: axes has " << axes.size() << " elements" << ", but data.ndim = " << ndim; // construct int_axes @@ -399,12 +399,12 @@ bool TransposeRel(const Array& types, int num_inputs, const Attrs& attrs, for (const Integer& e : axes) { int64_t axis = e; // sanity check for axis and ndim - CHECK(-ndim <= axis && axis < ndim) + ICHECK(-ndim <= axis && axis < ndim) << "transpose only allows each `axis` in `axes` in range [-data.ndim, data.ndim)" << ", but got axis = " << axis << ", and data.ndim = " << ndim; axis = axis < 0 ? axis + ndim : axis; // sanity check for duplication - CHECK(!axis_used[axis]) << "Duplicate axes in transpose: " << axis; + ICHECK(!axis_used[axis]) << "Duplicate axes in transpose: " << axis; axis_used[axis] = 1; int_axes.push_back(static_cast(axis)); } @@ -421,7 +421,7 @@ bool TransposeRel(const Array& types, int num_inputs, const Attrs& attrs, Array TransposeCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); return Array{topi::transpose(inputs[0], param->axes)}; } @@ -480,13 +480,13 @@ Array infer_newshape(const Array& data_shape, const Attrs& ++src_idx; } else if (svalue == 0) { // keep same - CHECK_LT(src_idx, ishape.size()); + ICHECK_LT(src_idx, ishape.size()); used_input_dims.insert(src_idx); used_output_dims.insert(oshape.size()); oshape.push_back(ishape[src_idx++]); } else if (svalue == -1) { // inference based on rest - CHECK_LT(infer_idx, 0) << "One and only one dim can be inferred"; + ICHECK_LT(infer_idx, 0) << "One and only one dim can be inferred"; infer_idx = i; oshape.push_back(1); ++src_idx; @@ -499,7 +499,7 @@ Array infer_newshape(const Array& data_shape, const Attrs& } } else if (svalue == -3) { // merge two dims from source - CHECK_LT(src_idx + 1, ishape.size()); + ICHECK_LT(src_idx + 1, ishape.size()); used_input_dims.insert(src_idx); IndexExpr d1 = ishape[src_idx++]; used_input_dims.insert(src_idx); @@ -513,14 +513,14 @@ Array infer_newshape(const Array& data_shape, const Attrs& } else if (svalue == -4) { // split the source dim s into two dims // read the left dim and then the right dim (either can be -1) - CHECK_LT(i + 2, newshape.size()); - CHECK_LT(src_idx, ishape.size()); + ICHECK_LT(i + 2, newshape.size()); + ICHECK_LT(src_idx, ishape.size()); used_input_dims.insert(src_idx); IndexExpr d0 = ishape[src_idx++]; Integer d1 = newshape[++i]; Integer d2 = newshape[++i]; if (d1->value == -1) { - CHECK_NE(d2->value, -1) << "Split dims cannot both be -1."; + ICHECK_NE(d2->value, -1) << "Split dims cannot both be -1."; used_output_dims.insert(oshape.size()); if (d0.as()) { oshape.push_back(Any()); @@ -584,10 +584,10 @@ bool ReshapeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { const auto* param = attrs.as(); // types: [data, result] - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) { - CHECK(types[0].as()) + ICHECK(types[0].as()) << "reshape: expect input type to be TensorType but get " << types[0]; return false; } @@ -627,7 +627,7 @@ bool ReshapeRel(const Array& types, int num_inputs, const Attrs& attrs, data_shape_sum *= Downcast(x)->value; } if (!found_dynamic) { - CHECK_EQ(oshape_sum, data_shape_sum) + ICHECK_EQ(oshape_sum, data_shape_sum) << "Input tensor shape and reshaped shape are not compatible"; } @@ -648,7 +648,7 @@ Array ReshapeCompute(const Attrs& attrs, const Array& in } const auto* out_ttype = out_type.as(); - CHECK(out_ttype != nullptr); + ICHECK(out_ttype != nullptr); Array newshape; bool newshape_has_any = false; for (auto val : out_ttype->shape) { @@ -745,7 +745,7 @@ Example:: */ bool ReshapeLikeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); if (data == nullptr) { return false; @@ -763,7 +763,7 @@ bool ReshapeLikeRel(const Array& types, int num_inputs, const Attrs& attrs } } if (is_static_shape) { - CHECK(reporter->AssertEQ(data->Size(), reshape_like->Size())) + ICHECK(reporter->AssertEQ(data->Size(), reshape_like->Size())) << "Reshape inputs size should be compatible."; } reporter->Assign(types[2], TensorType(reshape_like->shape, data->dtype)); @@ -795,7 +795,7 @@ the input array into an output array with the same shape as the second input arr // ArgWhere bool ArgWhereRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(num_inputs, 1); + ICHECK_EQ(num_inputs, 1); auto tt = types[0].as(); if (tt == nullptr) { @@ -832,8 +832,8 @@ TVM_REGISTER_NODE_TYPE(ScatterAttrs); // Scatter bool ScatterRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(num_inputs, 3); - CHECK_EQ(types.size(), 4); + ICHECK_EQ(num_inputs, 3); + ICHECK_EQ(types.size(), 4); auto data = types[0].as(); if (data == nullptr) { return false; @@ -846,9 +846,9 @@ bool ScatterRel(const Array& types, int num_inputs, const Attrs& attrs, if (updates == nullptr) { return false; } - CHECK(indices->dtype.is_int()) << "indices of take must be tensor of integer"; + ICHECK(indices->dtype.is_int()) << "indices of take must be tensor of integer"; const auto param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); reporter->Assign(types[3], TensorType(data->shape, data->dtype)); return true; } @@ -879,8 +879,8 @@ TVM_REGISTER_NODE_TYPE(ScatterAddAttrs); // Scatter Add bool ScatterAddRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(num_inputs, 3); - CHECK_EQ(types.size(), 4); + ICHECK_EQ(num_inputs, 3); + ICHECK_EQ(types.size(), 4); auto data = types[0].as(); if (data == nullptr) { return false; @@ -893,9 +893,9 @@ bool ScatterAddRel(const Array& types, int num_inputs, const Attrs& attrs, if (updates == nullptr) { return false; } - CHECK(indices->dtype.is_int()) << "indices of scatter_add must be tensor of integer"; + ICHECK(indices->dtype.is_int()) << "indices of scatter_add must be tensor of integer"; const auto param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); reporter->Assign(types[3], TensorType(data->shape, data->dtype)); return true; } @@ -926,7 +926,7 @@ TVM_REGISTER_NODE_TYPE(TakeAttrs); bool TakeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // `types` contains: [data, indices, result] - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); if (data == nullptr) { return false; @@ -935,9 +935,9 @@ bool TakeRel(const Array& types, int num_inputs, const Attrs& attrs, if (indices == nullptr) { return false; } - CHECK(indices->dtype.is_int()) << "indices of take must be tensor of integer"; + ICHECK(indices->dtype.is_int()) << "indices of take must be tensor of integer"; const auto param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); if (!param->axis.defined()) { std::vector oshape(indices->shape.begin(), indices->shape.end()); @@ -950,8 +950,8 @@ bool TakeRel(const Array& types, int num_inputs, const Attrs& attrs, const auto ndim_indices = static_cast(indices->shape.size()); int axis = static_cast(param->axis->value); if (axis < 0) axis += ndim_data; - CHECK_LE(axis, ndim_data) << "axis should be with in data shape" - << ", but got = " << axis; + ICHECK_LE(axis, ndim_data) << "axis should be with in data shape" + << ", but got = " << axis; oshape.reserve(ndim_data - 1 + ndim_indices); for (int i = 0; i < axis; ++i) { @@ -971,7 +971,7 @@ bool TakeRel(const Array& types, int num_inputs, const Attrs& attrs, Array TakeCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); if (!param->axis.defined()) { return Array{topi::take(inputs[0], inputs[1], param->mode)}; } else { @@ -1026,7 +1026,7 @@ TVM_REGISTER_NODE_TYPE(InitOpAttrs); bool FullRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const InitOpAttrs* param = attrs.as(); const auto* fill_value = types[0].as(); if (fill_value == nullptr) { @@ -1038,7 +1038,7 @@ bool FullRel(const Array& types, int num_inputs, const Attrs& attrs, out_dtype = fill_value->dtype; } - CHECK_EQ(fill_value->shape.size(), 0) + ICHECK_EQ(fill_value->shape.size(), 0) << "Fill value should be a scalar but has dimension " << fill_value->shape.size() << "."; std::vector oshape; @@ -1081,10 +1081,10 @@ RELAY_REGISTER_OP("full") bool InitOpRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // types = [ret_type] - CHECK_EQ(types.size(), 1); + ICHECK_EQ(types.size(), 1); const InitOpAttrs* param = attrs.as(); - CHECK(param); + ICHECK(param); DataType out_dtype = param->dtype; std::vector oshape; @@ -1137,7 +1137,7 @@ RELAY_REGISTER_OP("ones") bool FullLikeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); if (data == nullptr) { return false; @@ -1147,7 +1147,7 @@ bool FullLikeRel(const Array& types, int num_inputs, const Attrs& attrs, return false; } - CHECK_EQ(fill_value->shape.size(), 0) + ICHECK_EQ(fill_value->shape.size(), 0) << "The fill value should be a scalar but here it has dimension " << fill_value->shape.size() << "."; @@ -1185,7 +1185,7 @@ TVM_REGISTER_NODE_TYPE(ArangeAttrs); bool ArangeRel(const Array& types, int num_inputs, const Attrs& raw_attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 4); + ICHECK_EQ(types.size(), 4); const ArangeAttrs* attrs = raw_attrs.as(); const ConstantNode *cstart, *cstop, *cstep; @@ -1199,8 +1199,8 @@ bool ArangeRel(const Array& types, int num_inputs, const Attrs& raw_attrs, double stop = ToScalar(cstop->data); double step = ToScalar(cstep->data); int32_t num_elem = static_cast(std::ceil((stop - start) / step)); - CHECK_GT(num_elem, 0) << "Invalid arange attributes (start, stop, step): " << attrs->start - << ", " << attrs->stop << ", " << attrs->step; + ICHECK_GT(num_elem, 0) << "Invalid arange attributes (start, stop, step): " << attrs->start + << ", " << attrs->stop << ", " << attrs->step; reporter->Assign(types[3], TensorType({num_elem}, attrs->dtype)); return true; } else { @@ -1225,7 +1225,7 @@ inline te::Tensor DynamicArange(const te::Tensor& start, const te::Tensor& stop, Array ArangeCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const ArangeAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); te::Tensor start = inputs[0]; te::Tensor stop = inputs[1]; te::Tensor step = inputs[2]; @@ -1276,10 +1276,10 @@ TVM_REGISTER_NODE_TYPE(RepeatAttrs); bool RepeatRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // `types` contains: [data, result] - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) { - CHECK(types[0].as()) + ICHECK(types[0].as()) << "repeat: expect input type to be TensorType but get " << types[0]; return false; } @@ -1287,9 +1287,9 @@ bool RepeatRel(const Array& types, int num_inputs, const Attrs& attrs, const int ndim = static_cast(data->shape.size()); const int repeats = param->repeats; const int axis = param->axis; - CHECK(repeats >= 1) << "repeat only accepts `repeats >= 1`" - << ", but got repeats = " << repeats; - CHECK(-ndim - 1 <= axis && axis <= ndim) + ICHECK(repeats >= 1) << "repeat only accepts `repeats >= 1`" + << ", but got repeats = " << repeats; + ICHECK(-ndim - 1 <= axis && axis <= ndim) << "repeat only accepts `axis` in [-data.ndim - 1, data.ndim]" << ", but got axis = " << axis << ", and data.ndim = " << ndim; const int pivot = axis < 0 ? ndim + axis : axis; @@ -1313,7 +1313,7 @@ bool RepeatRel(const Array& types, int num_inputs, const Attrs& attrs, Array RepeatCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const RepeatAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); return {topi::repeat(inputs[0], param->repeats, param->axis)}; } @@ -1347,7 +1347,7 @@ TVM_REGISTER_NODE_TYPE(MeshgridAttrs); bool MeshgridRel(const Array& types, int num_inputs, const Attrs& raw_attrs, const TypeReporter& reporter) { // types: [data, result] - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const MeshgridAttrs* attrs = raw_attrs.as(); const auto* tensor_tuple = types[0].as(); if (tensor_tuple == nullptr) { @@ -1403,7 +1403,7 @@ bool MeshgridRel(const Array& types, int num_inputs, const Attrs& raw_attr Array MeshgridCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const MeshgridAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); return {topi::meshgrid(inputs, param->indexing)}; } @@ -1434,10 +1434,10 @@ TVM_REGISTER_NODE_TYPE(TileAttrs); bool TileRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // `types` contains: [data, result] - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) { - CHECK(types[0].as()) + ICHECK(types[0].as()) << "tile: expect input type to be TensorType but get " << types[0]; return false; } @@ -1445,12 +1445,12 @@ bool TileRel(const Array& types, int num_inputs, const Attrs& attrs, const size_t ndim = data->shape.size(); const Array& reps = param->reps; // check dimension match - CHECK(reps.defined()) << "repetition array is not defined. data.ndim = " << ndim; + ICHECK(reps.defined()) << "repetition array is not defined. data.ndim = " << ndim; const size_t rndim = reps.size(); for (size_t i = 0; i < rndim; ++i) { if (const tvm::tir::IntImmNode* val = reps[i].as()) { - CHECK_GT(val->value, 0) << "Tile reps value should always be larger than 0, but get: " - << val->value; + ICHECK_GT(val->value, 0) << "Tile reps value should always be larger than 0, but get: " + << val->value; } } size_t tndim = (ndim > rndim) ? ndim : rndim; @@ -1502,7 +1502,7 @@ bool TileRel(const Array& types, int num_inputs, const Attrs& attrs, Array TileCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const TileAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); return {topi::tile(inputs[0], param->reps)}; } @@ -1535,17 +1535,17 @@ TVM_REGISTER_NODE_TYPE(ReverseAttrs); bool ReverseRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // `types` contains: [data, result] - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) { - CHECK(types[0].as()) + ICHECK(types[0].as()) << "reverse: expect input type to be TensorType but get " << types[0]; return false; } const auto* param = attrs.as(); const int ndim = static_cast(data->shape.size()); const int axis = param->axis; - CHECK(-ndim <= axis && axis < ndim) + ICHECK(-ndim <= axis && axis < ndim) << "reverse only accepts `axis` in [-data.ndim, data.ndim - 1]" << ", but got axis = " << axis << ", and data.ndim = " << ndim; reporter->Assign(types[1], types[0]); @@ -1555,7 +1555,7 @@ bool ReverseRel(const Array& types, int num_inputs, const Attrs& attrs, Array ReverseCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const ReverseAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); // pass empty seq_length tensor to reverse_sequence return {topi::reverse_sequence(inputs[0], te::Tensor(), param->axis)}; } @@ -1589,44 +1589,44 @@ TVM_REGISTER_NODE_TYPE(ReverseSequenceAttrs); bool ReverseSequenceRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // `types` contains: [data, seq_lengths, result] - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); if (data == nullptr) { - CHECK(types[0].as()) + ICHECK(types[0].as()) << "reverse_sequence: expect input type to be TensorType but get " << types[0]; return false; } const auto* seq_lengths = types[1].as(); if (seq_lengths == nullptr) { - CHECK(types[1].as()) + ICHECK(types[1].as()) << "reverse_sequence: expect input type to be TensorType but get " << types[1]; return false; } const int seq_lengths_dim = static_cast(seq_lengths->shape.size()); - CHECK(seq_lengths_dim == 1) << "For reverse_sequnece, seq_lengths must be a 1D vector"; - CHECK(seq_lengths->dtype.is_int()) + ICHECK(seq_lengths_dim == 1) << "For reverse_sequnece, seq_lengths must be a 1D vector"; + ICHECK(seq_lengths->dtype.is_int()) << "For reverse_sequnece, seq_lengths must be tensor of integer"; const auto* param = attrs.as(); const int ndim = static_cast(data->shape.size()); int batch_axis = param->batch_axis; - CHECK(-ndim <= batch_axis && batch_axis < ndim) + ICHECK(-ndim <= batch_axis && batch_axis < ndim) << "reverse_sequence only accepts `batch_axis` in [-data.ndim, data.ndim - 1]" << ", but got batch_axis = " << batch_axis << ", and data.ndim = " << ndim; if (batch_axis < 0) { batch_axis = static_cast(data->shape.size()) + batch_axis; } - CHECK(reporter->Assert(seq_lengths->shape[0] == data->shape[batch_axis])) + ICHECK(reporter->Assert(seq_lengths->shape[0] == data->shape[batch_axis])) << "For reverse_sequnece seq_lengths size should match with dimension of batch axis" << ", but got dimension of batch_axis = " << data->shape[batch_axis] << ", and seq_length size = " << seq_lengths->shape[0]; const int seq_axis = param->seq_axis; - CHECK(-ndim <= seq_axis && seq_axis < ndim) + ICHECK(-ndim <= seq_axis && seq_axis < ndim) << "reverse_sequnece only accepts `seq_axis` in [-data.ndim, data.ndim - 1]" << ", but got seq_axis = " << seq_axis << ", and data.ndim = " << ndim; @@ -1637,7 +1637,7 @@ bool ReverseSequenceRel(const Array& types, int num_inputs, const Attrs& a Array ReverseSequenceCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const ReverseSequenceAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); return {topi::reverse_sequence(inputs[0], inputs[1], param->seq_axis, param->batch_axis)}; } @@ -1676,7 +1676,7 @@ Input is first sliced along batch axis and then elements are reversed along seq // where operator bool WhereRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 4U); + ICHECK_EQ(types.size(), 4U); const auto* condition = types[0].as(); const auto* x = types[1].as(); const auto* y = types[2].as(); @@ -1688,18 +1688,18 @@ bool WhereRel(const Array& types, int num_inputs, const Attrs& attrs, const auto& cond_shape = condition->shape; const auto& x_shape = x->shape; const auto& y_shape = y->shape; - CHECK(x_shape.size() == y_shape.size()) << "x and y must have the same size"; + ICHECK(x_shape.size() == y_shape.size()) << "x and y must have the same size"; if (cond_shape.size() != x_shape.size()) { - CHECK_EQ(cond_shape.size(), 1) << "Shape of condition " << condition->shape - << " must be either equal to x or has dimension of 1."; + ICHECK_EQ(cond_shape.size(), 1) << "Shape of condition " << condition->shape + << " must be either equal to x or has dimension of 1."; } for (size_t i = 0; i < x_shape.size(); i++) { - CHECK(reporter->AssertEQ(x_shape[i], y_shape[i])) + ICHECK(reporter->AssertEQ(x_shape[i], y_shape[i])) << "x and y must have the same shape: " << x_shape << " vs " << y_shape; if (i < cond_shape.size()) { - CHECK(reporter->AssertEQ(cond_shape[i], x_shape[i])) + ICHECK(reporter->AssertEQ(cond_shape[i], x_shape[i])) << "condition and x must have the same shape: " << cond_shape << " vs " << x_shape; } } @@ -1783,13 +1783,13 @@ TVM_REGISTER_GLOBAL("relay.op._make.squeeze").set_body_typed(MakeSqueeze); bool SqueezeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) { return false; } const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); std::vector result_shape; // if axes is None, squeeze all axes of dimension 1 if (!param->axis.defined()) { @@ -1798,7 +1798,7 @@ bool SqueezeRel(const Array& types, int num_inputs, const Attrs& attrs, LOG(FATAL) << "axis needs to be defined for dynamic input."; } const int64_t* axis_ptr = tir::as_const_int(e); - CHECK(axis_ptr != nullptr) << "the axes attribute must be concrete"; + ICHECK(axis_ptr != nullptr) << "the axes attribute must be concrete"; if (*axis_ptr != 1) { result_shape.push_back(e); } @@ -1814,8 +1814,8 @@ bool SqueezeRel(const Array& types, int num_inputs, const Attrs& attrs, if (axis_val < 0) { axis_val += static_cast(original_shape.size()); } - CHECK_GE(axis_val, 0); - CHECK_LT(axis_val, original_shape.size()); + ICHECK_GE(axis_val, 0); + ICHECK_LT(axis_val, original_shape.size()); original_shape.at(axis_val).second = false; } for (const auto& p : original_shape) { @@ -1823,7 +1823,7 @@ bool SqueezeRel(const Array& types, int num_inputs, const Attrs& attrs, result_shape.push_back(p.first); } else { if (const int64_t* axis_ptr = tir::as_const_int(p.first)) { - CHECK_EQ(*axis_ptr, 1) << "cannot squeeze axis with dimension not equal to 1"; + ICHECK_EQ(*axis_ptr, 1) << "cannot squeeze axis with dimension not equal to 1"; } } } @@ -1835,7 +1835,7 @@ bool SqueezeRel(const Array& types, int num_inputs, const Attrs& attrs, Array SqueezeCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const SqueezeAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); return {topi::squeeze(inputs[0], param->axis)}; } @@ -1856,7 +1856,7 @@ RELAY_REGISTER_OP("squeeze") // CollapseSumLike: -> B where BroadCast(A, B) = A bool CollapseSumLikeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); reporter->Assign(types[2], types[1]); return BroadcastRel({types[0], types[1], types[0]}, 2, Attrs(), reporter); } @@ -1869,7 +1869,7 @@ Expr MakeCollapseSumLike(Expr data, Expr collapse_type) { Array CollapseSumLikeCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const auto* out_ttype = out_type.as(); - CHECK(out_ttype != nullptr); + ICHECK(out_ttype != nullptr); return {topi::collapse_sum(inputs[0], out_ttype->shape)}; } @@ -1889,14 +1889,14 @@ RELAY_REGISTER_OP("collapse_sum_like") // CollapseSumTo: -> B where Broadcast(A, B) = A bool CollapseSumToRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const InitOpAttrs* param = attrs.as(); const auto* target_shape = types[1].as(); DataType out_dtype = types[0].as()->dtype; const IntImmNode* rank = target_shape->shape[0].as(); - CHECK(rank) << "Parameter must have static rank"; + ICHECK(rank) << "Parameter must have static rank"; std::vector oshape; if (param->shape) { @@ -1938,10 +1938,10 @@ RELAY_REGISTER_OP("collapse_sum_to") bool BroadCastToRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // types = [data_type, ret_type], broadcast_to_type is in attrs bc static - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const InitOpAttrs* param = attrs.as(); - CHECK(param); + ICHECK(param); DataType out_dtype = types[0].as()->dtype; std::vector oshape; @@ -1983,7 +1983,7 @@ RELAY_REGISTER_OP("broadcast_to") // BroadCastToLike: -> B where BroadCast(A, B) = B bool BroadCastToLikeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); reporter->Assign(types[2], types[1]); return BroadcastRel({types[0], types[1], types[1]}, 2, Attrs(), reporter); } @@ -1996,7 +1996,7 @@ Expr MakeBroadCastToLike(Expr data, Expr broadcast_type) { Array BroadCastToLikeCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const auto* out_ttype = out_type.as(); - CHECK(out_ttype != nullptr); + ICHECK(out_ttype != nullptr); return {topi::broadcast_to(inputs[0], out_ttype->shape)}; } @@ -2016,7 +2016,7 @@ RELAY_REGISTER_OP("broadcast_to_like") // Adapter function to make int array. Array GetIntArray(Array arr) { for (size_t i = 0; i < arr.size(); ++i) { - CHECK(!arr[i].defined() || arr[i].as()) << "Expect an int array"; + ICHECK(!arr[i].defined() || arr[i].as()) << "Expect an int array"; } return Downcast>(arr); } @@ -2026,7 +2026,7 @@ TVM_REGISTER_NODE_TYPE(StridedSliceAttrs); bool StridedSliceRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const StridedSliceAttrs* param = attrs.as(); if (param == nullptr) { return false; @@ -2047,7 +2047,7 @@ bool StridedSliceRel(const Array& types, int num_inputs, const Attrs& attr std::vector stride_vec(num_axis, 1); if (param->slice_mode == "end") { for (size_t i = 0; i < param->strides.value().size(); ++i) { - CHECK(param->strides.value()[i].defined()); + ICHECK(param->strides.value()[i].defined()); stride_vec[i] = param->strides.value()[i]->value; } } @@ -2111,14 +2111,14 @@ bool StridedSliceRel(const Array& types, int num_inputs, const Attrs& attr int64_t slice_range, step; if (stride_v < 0) { if (end_v < -1) end_v = -1; - CHECK_LE(end_v, begin_v) << "strided_slice get empty slice at axis " << i; + ICHECK_LE(end_v, begin_v) << "strided_slice get empty slice at axis " << i; begin_v = std::min(dim_size - 1, begin_v); slice_range = begin_v - end_v; step = -stride_v; } else { if (begin_v < 0) begin_v = 0; - CHECK_GE(stride_v, 0); - CHECK_LE(begin_v, end_v) << "strided_slice get invalid slice at axis " << i; + ICHECK_GE(stride_v, 0); + ICHECK_LE(begin_v, end_v) << "strided_slice get invalid slice at axis " << i; end_v = std::min(dim_size, end_v); slice_range = end_v - begin_v; step = stride_v; @@ -2126,9 +2126,9 @@ bool StridedSliceRel(const Array& types, int num_inputs, const Attrs& attr oshape[i] = tir::make_const(dshape[i].dtype(), (slice_range + step - 1) / step); } } else { - CHECK(param->begin) << "strided_slice recieved invalid begin " << param->begin; - CHECK(param->end) << "strided_slice recieved invalid end " << param->end; - CHECK(param->strides) << "strided_slice recieved invalid strides " << param->strides; + ICHECK(param->begin) << "strided_slice recieved invalid begin " << param->begin; + ICHECK(param->end) << "strided_slice recieved invalid end " << param->end; + ICHECK(param->strides) << "strided_slice recieved invalid strides " << param->strides; } reporter->Assign(types[1], TensorType(oshape, data->dtype)); return true; @@ -2140,37 +2140,37 @@ Array> StridedSliceInferCorrectLayout(const Attrs& attrs, const Array& old_in_types) { Array> old_in_shapes; for (auto old_in_t : old_in_types) { - CHECK(old_in_t.as()); + ICHECK(old_in_t.as()); old_in_shapes.push_back(old_in_t.as()->shape); } - CHECK(old_in_layouts.defined()); - CHECK_GE(old_in_layouts.size(), 1); - CHECK(old_in_shapes.defined()); - CHECK_GE(old_in_shapes.size(), 1); + ICHECK(old_in_layouts.defined()); + ICHECK_GE(old_in_layouts.size(), 1); + ICHECK(old_in_shapes.defined()); + ICHECK_GE(old_in_shapes.size(), 1); auto layout = old_in_layouts[0]; if (layout.defined() && new_in_layouts.defined()) { - CHECK_GE(new_in_layouts.size(), 1); + ICHECK_GE(new_in_layouts.size(), 1); auto new_layout = new_in_layouts[0]; auto shape = old_in_shapes[0]; // NOTE: Discard "const" qualifier here. auto* params = const_cast(attrs.as()); - CHECK(params != nullptr); + ICHECK(params != nullptr); Array begin, end, strides; if (params->begin && params->end && params->strides) { for (Integer i : params->strides.value()) { - CHECK(i.defined()); + ICHECK(i.defined()); strides.push_back(params->slice_mode == "size" ? 1 : i->value); } for (Integer i : params->begin.value()) { - CHECK(i.defined()); + ICHECK(i.defined()); begin.push_back(i->value); } for (Integer i : params->end.value()) { - CHECK(i.defined()); + ICHECK(i.defined()); end.push_back(i->value); } } @@ -2273,7 +2273,7 @@ Array> StridedSliceInferCorrectLayout(const Attrs& attrs, Array StridedSliceCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const StridedSliceAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); Array begin, end, strides; begin = param->begin.value(); end = param->end.value(); @@ -2281,7 +2281,7 @@ Array StridedSliceCompute(const Attrs& attrs, const Arrayshape.size(); - CHECK(begin.size() == src_tensor_dim) + ICHECK(begin.size() == src_tensor_dim) << "for dynamic inputs, len(begin) must equal the input dimension"; Array out_shape; for (size_t i = 0; i < src_tensor_dim; ++i) { @@ -2364,7 +2364,7 @@ Examples:: // strided_set bool StridedSetRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 6); + ICHECK_EQ(types.size(), 6); reporter->Assign(types[5], types[0]); return true; } @@ -2408,23 +2408,23 @@ TVM_REGISTER_NODE_TYPE(SplitAttrs); bool SplitRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // `types` contains: [data, result] - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) return false; - CHECK_NE(data->shape.size(), 0) << "Input shape cannot be empty"; + ICHECK_NE(data->shape.size(), 0) << "Input shape cannot be empty"; const auto param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); auto axis = param->axis; if (axis < 0) { axis += data->shape.size(); } - CHECK_LT(axis, data->shape.size()) << "axis should be within the input dimension range."; - CHECK_GE(axis, 0) << "axis should be within the input dimension range."; + ICHECK_LT(axis, data->shape.size()) << "axis should be within the input dimension range."; + ICHECK_GE(axis, 0) << "axis should be within the input dimension range."; if (const IntImmNode* sections = param->indices_or_sections.as()) { if (!data->shape[axis].as()) { - CHECK(reporter->Assert(indexmod(data->shape[axis], sections->value) == - tir::make_zero(DataType::Int(64)))) + ICHECK(reporter->Assert(indexmod(data->shape[axis], sections->value) == + tir::make_zero(DataType::Int(64)))) << "indices_or_sections need to be able to divide input.shape[axis]"; } std::vector fields; @@ -2444,7 +2444,7 @@ bool SplitRel(const Array& types, int num_inputs, const Attrs& attrs, auto begin = IndexExpr(tir::make_zero(DataType::Int(32))); std::vector fields; for (unsigned int i = 0; i < indices.size(); ++i) { - CHECK(reporter->Assert(Downcast(indices[i]) > begin)) + ICHECK(reporter->Assert(Downcast(indices[i]) > begin)) << "indices_or_sections need to be a sorted ascending list"; std::vector oshape(data->shape.begin(), data->shape.end()); oshape[axis] = Downcast(indices[i]) - begin; @@ -2453,7 +2453,7 @@ bool SplitRel(const Array& types, int num_inputs, const Attrs& attrs, fields.push_back(vec_type); } if (!data->shape[axis].as()) { - CHECK(reporter->Assert(begin < data->shape[axis])) + ICHECK(reporter->Assert(begin < data->shape[axis])) << "The sum of sections must match the input.shape[axis]"; } std::vector oshape(data->shape.begin(), data->shape.end()); @@ -2472,7 +2472,7 @@ bool SplitRel(const Array& types, int num_inputs, const Attrs& attrs, Array SplitCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const auto param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); if (const IntImmNode* sections = param->indices_or_sections.as()) { int64_t num_sections = sections->value; @@ -2538,7 +2538,7 @@ TVM_REGISTER_NODE_TYPE(SliceLikeAttrs); */ bool SliceLikeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); if (data == nullptr) { return false; @@ -2550,7 +2550,7 @@ bool SliceLikeRel(const Array& types, int num_inputs, const Attrs& attrs, } const auto param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const Array& dshape = data->shape; const Array& target_shape = target->shape; @@ -2560,22 +2560,22 @@ bool SliceLikeRel(const Array& types, int num_inputs, const Attrs& attrs, for (size_t i = 0; i < dshape.size(); ++i) { if (i < target_shape.size()) { oshape[i] = target_shape[i]; - CHECK(reporter->Assert(oshape[i] <= dshape[i])) + ICHECK(reporter->Assert(oshape[i] <= dshape[i])) << "End index of axis " << i << " exceeds input shape: " << oshape[i] << " vs " << dshape[i]; } } } else { - CHECK(param->axes.size() != 0) << "Axes cannot be empty."; + ICHECK(param->axes.size() != 0) << "Axes cannot be empty."; for (Integer val : param->axes) { int axis = val->value; if (axis < 0) { axis += dshape.size(); } - CHECK(axis < static_cast(target_shape.size())) + ICHECK(axis < static_cast(target_shape.size())) << "Axis " << axis << " exceeds dimension " << target_shape.size() << " of target_shape."; oshape[axis] = target_shape[axis]; - CHECK(reporter->Assert(oshape[axis] <= dshape[axis])) + ICHECK(reporter->Assert(oshape[axis] <= dshape[axis])) << "End index of axis " << axis << " exceeds input shape: " << oshape[axis] << " vs " << dshape[axis]; } @@ -2595,7 +2595,7 @@ Expr MakeSliceLike(Expr data, Expr shape_like, Array axes) { Array SliceLikeCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); Array src_shape = inputs[0]->shape; Array target_shape = inputs[1]->shape; Array begin_idx, end_idx, strides; @@ -2608,7 +2608,7 @@ Array SliceLikeCompute(const Attrs& attrs, const Array& for (size_t i = 0; i < src_shape.size(); ++i) { if (i < target_shape.size()) { end_idx.Set(i, target_shape[i]); - CHECK_LE(topi::GetConstInt(end_idx[i]), topi::GetConstInt(src_shape[i])) + ICHECK_LE(topi::GetConstInt(end_idx[i]), topi::GetConstInt(src_shape[i])) << "End index of axis " << i << " exceeds input shape: " << topi::GetConstInt(end_idx[i]) << " vs " << topi::GetConstInt(src_shape[i]); @@ -2620,7 +2620,7 @@ Array SliceLikeCompute(const Attrs& attrs, const Array& axis = static_cast(src_shape.size()) + axis; } end_idx.Set(axis, target_shape[axis]); - CHECK_LE(topi::GetConstInt(end_idx[axis]), topi::GetConstInt(src_shape[axis])) + ICHECK_LE(topi::GetConstInt(end_idx[axis]), topi::GetConstInt(src_shape[axis])) << "End index of axis " << axis << " exceeds input shape: " << topi::GetConstInt(end_idx[axis]) << " vs " << topi::GetConstInt(src_shape[axis]); @@ -2650,7 +2650,7 @@ TVM_REGISTER_NODE_TYPE(LayoutTransformAttrs); Array LayoutTransformCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); return Array{topi::layout_transform(inputs[0], param->src_layout, param->dst_layout)}; } @@ -2658,7 +2658,7 @@ bool LayoutTransformRel(const Array& types, int num_inputs, const Attrs& a const TypeReporter& reporter) { const auto* data = types[0].as(); if (data == nullptr) { - CHECK(types[0].as()) + ICHECK(types[0].as()) << "LayoutTransform: expect input data type to be TensorType but get " << types[0]; return false; } @@ -2667,9 +2667,9 @@ bool LayoutTransformRel(const Array& types, int num_inputs, const Attrs& a Layout src_layout(params->src_layout); Layout dst_layout(params->dst_layout); - CHECK(src_layout.defined() && dst_layout.defined()) << "cannot convert from/to undefined layout"; + ICHECK(src_layout.defined() && dst_layout.defined()) << "cannot convert from/to undefined layout"; auto layout_converter = tir::BijectiveLayout(src_layout, dst_layout); - CHECK(layout_converter.defined()) + ICHECK(layout_converter.defined()) << "cannot convert from " << params->src_layout << " to " << params->dst_layout; const auto& out_shape = layout_converter.ForwardShape(data->shape); @@ -2740,39 +2740,39 @@ TVM_REGISTER_NODE_TYPE(GatherAttrs); bool GatherRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // `types` contains: [data, indices, result] - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); const auto* indices = types[1].as(); if (data == nullptr) { - CHECK(types[0].as()) + ICHECK(types[0].as()) << "Gather: expect input data type to be TensorType but get " << types[0]; return false; } if (indices == nullptr) { - CHECK(types[1].as()) + ICHECK(types[1].as()) << "Gather: expect indices type to be TensorType but get " << types[1]; return false; } - CHECK(indices->dtype.is_int()) << "indices of take must be tensor of integer"; + ICHECK(indices->dtype.is_int()) << "indices of take must be tensor of integer"; const auto param = attrs.as(); - CHECK(param != nullptr); - CHECK(param->axis.defined()); + ICHECK(param != nullptr); + ICHECK(param->axis.defined()); const auto ndim_data = data->shape.size(); const auto ndim_indices = indices->shape.size(); int axis = param->axis->value; - CHECK_EQ(ndim_data, ndim_indices); - CHECK_GE(axis, 0); - CHECK_LT(axis, ndim_data); + ICHECK_EQ(ndim_data, ndim_indices); + ICHECK_GE(axis, 0); + ICHECK_LT(axis, ndim_data); std::vector oshape; oshape.reserve(ndim_data); for (size_t i = 0; i < ndim_data; ++i) { if (i == (size_t)axis) { const int64_t* indice_shape_i = tir::as_const_int(indices->shape[i]); - CHECK_GE(*indice_shape_i, 1); + ICHECK_GE(*indice_shape_i, 1); } else { - CHECK(reporter->AssertEQ(indices->shape[i], data->shape[i])); + ICHECK(reporter->AssertEQ(indices->shape[i], data->shape[i])); } oshape.emplace_back(indices->shape[i]); } @@ -2820,23 +2820,23 @@ which must just be not null. Output will have same shape as ``indices``. bool GatherNDRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // `types` contains: [data, indices, result] - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); const auto* indices = types[1].as(); if (data == nullptr) { - CHECK(types[0].as()) + ICHECK(types[0].as()) << "GatherND: expect input data type to be TensorType but get " << types[0]; return false; } if (indices == nullptr) { - CHECK(types[1].as()) + ICHECK(types[1].as()) << "GatherND: expect indices type to be TensorType but get " << types[1]; return false; } const size_t ndim = data->shape.size(); const IntImmNode* mdim = indices->shape[0].as(); const size_t kdim = indices->shape.size() - 1; - CHECK(size_t(mdim->value) <= ndim) << "GatherND: indices shape does satisfy."; + ICHECK(size_t(mdim->value) <= ndim) << "GatherND: indices shape does satisfy."; Array oshape; for (size_t i = 1; i < kdim + 1; ++i) oshape.push_back(indices->shape[i]); @@ -2879,14 +2879,14 @@ TVM_REGISTER_NODE_TYPE(SequenceMaskAttrs); bool SequenceMaskRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // `types` contains: [data, valid_length, result] - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); const auto* valid_length = types[1].as(); - CHECK(data); - CHECK(valid_length); + ICHECK(data); + ICHECK(valid_length); const auto param = attrs.as(); Array valid_length_shape; - CHECK(param->axis == 0 || param->axis == 1); + ICHECK(param->axis == 0 || param->axis == 1); valid_length_shape.push_back(data->shape[1 - param->axis]); reporter->Assign(types[1], TensorType(valid_length_shape, valid_length->dtype)); reporter->Assign(types[2], types[0]); @@ -2896,7 +2896,7 @@ bool SequenceMaskRel(const Array& types, int num_inputs, const Attrs& attr Array SequenceMaskCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); return Array{ topi::sequence_mask(inputs[0], inputs[1], param->mask_value, param->axis)}; } @@ -2976,12 +2976,12 @@ TVM_REGISTER_NODE_TYPE(OneHotAttrs); bool OneHotRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // `types` contains: [indices, on_value, off_value, result] - CHECK_EQ(types.size(), 4); + ICHECK_EQ(types.size(), 4); const auto* indices = types[0].as(); - CHECK(indices); + ICHECK(indices); const auto param = attrs.as(); - CHECK_GT(param->depth, 0); + ICHECK_GT(param->depth, 0); Array oshape; int ndim = indices->shape.size() + 1; @@ -3002,7 +3002,7 @@ bool OneHotRel(const Array& types, int num_inputs, const Attrs& attrs, Array OneHotCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); return Array{ topi::one_hot(inputs[0], inputs[1](), inputs[2](), param->depth, param->axis, param->dtype)}; } @@ -3046,23 +3046,23 @@ RELAY_REGISTER_OP("one_hot") /* relay.unravel_index */ bool UnRavelIndexRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* indices = types[0].as(); if (indices == nullptr) { - CHECK(types[0].as()) + ICHECK(types[0].as()) << "unravel_index: expect input type to be TensorType but get " << types[0]; return false; } - CHECK(indices->dtype.is_int()) << "indices of unravel_index must be tensor of integer"; + ICHECK(indices->dtype.is_int()) << "indices of unravel_index must be tensor of integer"; const auto* shape = types[1].as(); if (shape == nullptr) { - CHECK(types[1].as()) + ICHECK(types[1].as()) << "unravel_index: expect input type to be TensorType but get " << types[1]; return false; } - CHECK(indices->dtype.is_int()) << "shape of unravel_index must be tensor of integer"; + ICHECK(indices->dtype.is_int()) << "shape of unravel_index must be tensor of integer"; Array indices_shape; Array shape_shape; @@ -3108,7 +3108,7 @@ TVM_REGISTER_NODE_TYPE(SparseToDenseAttrs); bool SparseToDenseRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(num_inputs, 3); + ICHECK_EQ(num_inputs, 3); auto sparse_indices = types[0].as(); auto sparse_values = types[1].as(); auto default_value = types[2].as(); @@ -3117,17 +3117,17 @@ bool SparseToDenseRel(const Array& types, int num_inputs, const Attrs& att return false; } - CHECK(sparse_indices->dtype.is_int()) << "sparse_indices must be tensor of integers"; + ICHECK(sparse_indices->dtype.is_int()) << "sparse_indices must be tensor of integers"; - CHECK_LE(sparse_indices->shape.size(), 3) + ICHECK_LE(sparse_indices->shape.size(), 3) << "sparse_indices must be a tensor of either 0D, 1D or 2D"; - CHECK_LE(sparse_values->shape.size(), 2) << "sparse_values must be a tensor of either 0D, 1D"; + ICHECK_LE(sparse_values->shape.size(), 2) << "sparse_values must be a tensor of either 0D, 1D"; - CHECK_EQ(default_value->shape.size(), 0) << "default_value should be a scalar"; + ICHECK_EQ(default_value->shape.size(), 0) << "default_value should be a scalar"; const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); Array oshape; for (auto i : param->output_shape) { @@ -3139,9 +3139,9 @@ bool SparseToDenseRel(const Array& types, int num_inputs, const Attrs& att Array SparseToDenseCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { - CHECK_EQ(inputs.size(), 3); + ICHECK_EQ(inputs.size(), 3); const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); return {topi::sparse_to_dense(inputs[0], param->output_shape, inputs[1], inputs[2]())}; } @@ -3186,16 +3186,16 @@ TVM_REGISTER_NODE_TYPE(MatrixSetDiagAttrs); bool MatrixSetDiagRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // `types` contains: [input, diagonal, result] - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* input = types[0].as(); - CHECK(input); + ICHECK(input); const auto* diagonal = types[1].as(); - CHECK(diagonal); + ICHECK(diagonal); const auto param = attrs.as(); - CHECK_GE(param->k2, param->k1); + ICHECK_GE(param->k2, param->k1); int d_ndims = diagonal->shape.size(); int i_ndims = input->shape.size(); @@ -3224,7 +3224,7 @@ bool MatrixSetDiagRel(const Array& types, int num_inputs, const Attrs& att Array MatrixSetDiagCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); return Array{topi::matrix_set_diag(inputs[0], inputs[1], param->k1, param->k2, param->super_diag_right_align, param->sub_diag_right_align)}; @@ -3265,7 +3265,7 @@ RELAY_REGISTER_OP("matrix_set_diag") // adv_index bool AdvIndexRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(num_inputs, 1); + ICHECK_EQ(num_inputs, 1); auto inputs = types[0].as(); auto data = inputs->fields[0].as(); @@ -3285,7 +3285,7 @@ bool AdvIndexRel(const Array& types, int num_inputs, const Attrs& attrs, if (index_type == nullptr) { return false; } - CHECK(index_type->dtype.is_int()) << "indices must be tensor of integers"; + ICHECK(index_type->dtype.is_int()) << "indices must be tensor of integers"; int64_t flatten_len = 1; bool has_dyn_shape = false; diff --git a/src/relay/op/tensor/transform.h b/src/relay/op/tensor/transform.h index 0fe4734fe883..4173d57a84de 100644 --- a/src/relay/op/tensor/transform.h +++ b/src/relay/op/tensor/transform.h @@ -44,7 +44,7 @@ template bool ConcatenateRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // types: [data, result] - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); /* If we receive a tuple we can continue, if we receive * anything but an incomplete type we should signal an * error. @@ -131,9 +131,9 @@ static inline Array> ConcatenateLayout(const Attrs& attrs, ConcatenateAttrs* param = const_cast(attrs.as()); Array> old_in_shapes; - CHECK_EQ(old_in_types.size(), 1); + ICHECK_EQ(old_in_types.size(), 1); for (auto old_in_tuple_t : old_in_types) { - CHECK(old_in_tuple_t.as()); + ICHECK(old_in_tuple_t.as()); for (auto old_in_t : old_in_tuple_t.as()->fields) { old_in_shapes.push_back(old_in_t.as()->shape); } diff --git a/src/relay/op/tensor/unary.cc b/src/relay/op/tensor/unary.cc index 59ef47f413fe..e17bdc0e0906 100644 --- a/src/relay/op/tensor/unary.cc +++ b/src/relay/op/tensor/unary.cc @@ -424,9 +424,9 @@ TVM_REGISTER_NODE_TYPE(ShapeOfAttrs); Array ShapeOfCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { - CHECK_EQ(inputs.size(), 1); + ICHECK_EQ(inputs.size(), 1); const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); return {topi::shape(inputs[0], param->dtype)}; } @@ -456,7 +456,7 @@ TVM_REGISTER_NODE_TYPE(NdarraySizeAttrs); bool NdarraySizeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(num_inputs, 1); + ICHECK_EQ(num_inputs, 1); auto tt = types[0].as(); if (tt == nullptr) { @@ -464,16 +464,16 @@ bool NdarraySizeRel(const Array& types, int num_inputs, const Attrs& attrs } const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); reporter->Assign(types[1], TensorType({}, param->dtype)); return true; } Array NdarraySizeCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { - CHECK_EQ(inputs.size(), 1); + ICHECK_EQ(inputs.size(), 1); const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); return Array{topi::ndarray_size(inputs[0], param->dtype)}; } diff --git a/src/relay/op/type_relations.cc b/src/relay/op/type_relations.cc index 0647ec9780f3..3dc33c5022e0 100644 --- a/src/relay/op/type_relations.cc +++ b/src/relay/op/type_relations.cc @@ -99,12 +99,12 @@ Type ConcreteBroadcast(const TensorType& t1, const TensorType& t2, DataType outp bool BroadcastRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); // DLOG(INFO) << "In1:" << types[0] << ",In2:" << types[1] // << ",Out:" << types[2] << std::endl; if (auto* t0 = types[0].as()) { if (auto* t1 = types[1].as()) { - CHECK_EQ(t0->dtype, t1->dtype); + ICHECK_EQ(t0->dtype, t1->dtype); reporter->Assign( types[2], ConcreteBroadcast(GetRef(t0), GetRef(t1), t0->dtype)); return true; @@ -115,12 +115,12 @@ bool BroadcastRel(const Array& types, int num_inputs, const Attrs& attrs, bool BroadcastCompRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); // DLOG(INFO) << "In1:" << types[0] << ",In2:" << types[1] // << ",Out:" << types[2] << std::endl; if (auto* t0 = types[0].as()) { if (auto* t1 = types[1].as()) { - CHECK_EQ(t0->dtype, t1->dtype); + ICHECK_EQ(t0->dtype, t1->dtype); reporter->Assign(types[2], ConcreteBroadcast(GetRef(t0), GetRef(t1), DataType::Bool())); return true; @@ -149,13 +149,13 @@ Array RankShape(const Array& shape) { bool ShapeOfRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(num_inputs, 1); + ICHECK_EQ(num_inputs, 1); auto tt = types[0].as(); if (tt == nullptr) { return false; } const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); auto rank_shape = RankShape(tt->shape); reporter->Assign(types[1], TensorType(rank_shape, param->dtype)); return true; diff --git a/src/relay/op/vision/multibox_op.cc b/src/relay/op/vision/multibox_op.cc index b766facff050..17d0a4718298 100644 --- a/src/relay/op/vision/multibox_op.cc +++ b/src/relay/op/vision/multibox_op.cc @@ -32,12 +32,12 @@ TVM_REGISTER_NODE_TYPE(MultiBoxPriorAttrs); bool MultiboxPriorRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); const MultiBoxPriorAttrs* param = attrs.as(); const auto& dshape = data->shape; - CHECK_EQ(dshape.size(), 4) << "Input data should be 4D: " - "[batch, channel, height, width]"; + ICHECK_EQ(dshape.size(), 4) << "Input data should be 4D: " + "[batch, channel, height, width]"; IndexExpr in_height = dshape[2]; IndexExpr in_width = dshape[3]; int num_sizes = static_cast(param->sizes.size()); @@ -78,7 +78,7 @@ TVM_REGISTER_NODE_TYPE(MultiBoxTransformLocAttrs); bool MultiBoxTransformLocRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 4); + ICHECK_EQ(types.size(), 4); const auto* cls_prob = types[0].as(); const auto* loc_pred = types[1].as(); @@ -92,17 +92,17 @@ bool MultiBoxTransformLocRel(const Array& types, int num_inputs, const Att const auto& loc_shape = loc_pred->shape; const auto& anchor_shape = anchor->shape; - CHECK_EQ(cls_shape.size(), 3U) << "The dimension of class probability should be 3, but received " - << cls_shape.size(); - CHECK_EQ(loc_shape.size(), 2U) + ICHECK_EQ(cls_shape.size(), 3U) << "The dimension of class probability should be 3, but received " + << cls_shape.size(); + ICHECK_EQ(loc_shape.size(), 2U) << "The dimension of location prediction should be 2, but received " << loc_shape.size(); - CHECK_EQ(anchor_shape.size(), 3U) + ICHECK_EQ(anchor_shape.size(), 3U) << "The dimension of anchor should be 3, but received " << anchor_shape.size(); - CHECK(reporter->AssertEQ(cls_shape[2], anchor_shape[1])) << "Number of anchors mismatch found"; - CHECK(reporter->AssertEQ(cls_shape[2] * 4, loc_shape[1])) << "# anchors mismatch with # loc."; - CHECK(reporter->Assert(anchor_shape[1] > 0)) << "Number of anchors must > 0."; - CHECK(reporter->AssertEQ(anchor_shape[2], 4)); + ICHECK(reporter->AssertEQ(cls_shape[2], anchor_shape[1])) << "Number of anchors mismatch found"; + ICHECK(reporter->AssertEQ(cls_shape[2] * 4, loc_shape[1])) << "# anchors mismatch with # loc."; + ICHECK(reporter->Assert(anchor_shape[1] > 0)) << "Number of anchors must > 0."; + ICHECK(reporter->AssertEQ(anchor_shape[2], 4)); std::vector oshape0({cls_shape[0], anchor_shape[1], 6}); std::vector oshape1({cls_shape[0]}); diff --git a/src/relay/op/vision/nms.cc b/src/relay/op/vision/nms.cc index f9cdaf66e255..76fdf2829ed0 100644 --- a/src/relay/op/vision/nms.cc +++ b/src/relay/op/vision/nms.cc @@ -31,10 +31,10 @@ TVM_REGISTER_NODE_TYPE(GetValidCountsAttrs); bool GetValidCountRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); const auto& dshape = data->shape; - CHECK_EQ(dshape.size(), 3) << "Input data should be 3-D."; + ICHECK_EQ(dshape.size(), 3) << "Input data should be 3-D."; std::vector oshape({data->shape[0]}); std::vector oshape_indices({data->shape[0], data->shape[1]}); @@ -73,14 +73,14 @@ TVM_REGISTER_NODE_TYPE(NonMaximumSuppressionAttrs); bool NMSRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 5); + ICHECK_EQ(types.size(), 5); const auto* data = types[0].as(); const auto* valid_count = types[1].as(); const NonMaximumSuppressionAttrs* param = attrs.as(); const auto& dshape = data->shape; const auto& vshape = valid_count->shape; - CHECK_EQ(dshape.size(), 3) << "Input data should be 3-D."; - CHECK_EQ(vshape.size(), 1) << "Input valid count should be 1-D."; + ICHECK_EQ(dshape.size(), 3) << "Input data should be 3-D."; + ICHECK_EQ(vshape.size(), 1) << "Input valid count should be 1-D."; // assign output type if (param->return_indices) { diff --git a/src/relay/op/vision/rcnn_op.cc b/src/relay/op/vision/rcnn_op.cc index 6550815c6422..8be38d020480 100644 --- a/src/relay/op/vision/rcnn_op.cc +++ b/src/relay/op/vision/rcnn_op.cc @@ -35,23 +35,23 @@ TVM_REGISTER_NODE_TYPE(ROIAlignAttrs); bool ROIAlignRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { auto roi_align_attrs = attrs.as(); - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); const auto* rois = types[1].as(); - CHECK(data); - CHECK(rois); + ICHECK(data); + ICHECK(rois); const auto& dshape = data->shape; const auto& rshape = rois->shape; - CHECK(roi_align_attrs); - CHECK_EQ(dshape.size(), 4) << "Input data should be 4-D."; - CHECK_EQ(rshape.size(), 2) << "Input rois should be 2-D."; + ICHECK(roi_align_attrs); + ICHECK_EQ(dshape.size(), 4) << "Input data should be 4-D."; + ICHECK_EQ(rshape.size(), 2) << "Input rois should be 2-D."; // assign output type std::vector oshape; if (roi_align_attrs->layout == "NCHW") { oshape = {rshape[0], dshape[1], roi_align_attrs->pooled_size[0], roi_align_attrs->pooled_size[1]}; } else { - CHECK_EQ(roi_align_attrs->layout, "NHWC") << "Unexpected ROI Align layout"; + ICHECK_EQ(roi_align_attrs->layout, "NHWC") << "Unexpected ROI Align layout"; oshape = {rshape[0], roi_align_attrs->pooled_size[0], roi_align_attrs->pooled_size[1], dshape[3]}; } @@ -111,15 +111,15 @@ TVM_REGISTER_NODE_TYPE(ROIPoolAttrs); bool ROIPoolRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { auto roi_pool_attrs = attrs.as(); - CHECK_EQ(types.size(), 3); + ICHECK_EQ(types.size(), 3); const auto* data = types[0].as(); const auto* rois = types[1].as(); const auto& dshape = data->shape; const auto& rshape = rois->shape; - CHECK(roi_pool_attrs); - CHECK_EQ(dshape.size(), 4) << "Input data should be 4-D."; - CHECK_EQ(rshape.size(), 2) << "Input rois should be 2-D."; - CHECK_EQ(roi_pool_attrs->layout, "NCHW") << "ROI Pool only supports NCHW layout"; + ICHECK(roi_pool_attrs); + ICHECK_EQ(dshape.size(), 4) << "Input data should be 4-D."; + ICHECK_EQ(rshape.size(), 2) << "Input rois should be 2-D."; + ICHECK_EQ(roi_pool_attrs->layout, "NCHW") << "ROI Pool only supports NCHW layout"; // assign output type std::vector oshape( {rshape[0], dshape[1], roi_pool_attrs->pooled_size[0], roi_pool_attrs->pooled_size[1]}); @@ -160,7 +160,7 @@ TVM_REGISTER_NODE_TYPE(ProposalAttrs); bool ProposalRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { auto proposal_attrs = attrs.as(); - CHECK_EQ(types.size(), 4); + ICHECK_EQ(types.size(), 4); const auto* cls_prob = types[0].as(); const auto* bbox_pred = types[1].as(); const auto* im_info = types[2].as(); @@ -169,13 +169,13 @@ bool ProposalRel(const Array& types, int num_inputs, const Attrs& attrs, return false; } - CHECK_EQ(cls_prob->shape.size(), 4U) + ICHECK_EQ(cls_prob->shape.size(), 4U) << "The dimension of class probability should be 4, but received " << cls_prob->shape.size(); - CHECK_EQ(bbox_pred->shape.size(), 4U) + ICHECK_EQ(bbox_pred->shape.size(), 4U) << "The dimension of box prediction should be 4, but received " << bbox_pred->shape.size(); - CHECK_EQ(im_info->shape.size(), 2U) + ICHECK_EQ(im_info->shape.size(), 2U) << "The dimension of image info should be 2, but received " << im_info->shape.size(); - CHECK(reporter->AssertEQ(im_info->shape[1], 3)); + ICHECK(reporter->AssertEQ(im_info->shape[1], 3)); auto batch = cls_prob->shape[0]; diff --git a/src/relay/op/vision/yolo.cc b/src/relay/op/vision/yolo.cc index cfd81131be73..70d882061299 100644 --- a/src/relay/op/vision/yolo.cc +++ b/src/relay/op/vision/yolo.cc @@ -44,14 +44,14 @@ TVM_REGISTER_NODE_TYPE(YoloReorgAttrs); */ bool YoloReorgRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); const auto* data = types[0].as(); if (data == nullptr) return false; const YoloReorgAttrs* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); - CHECK(data->shape.size() == 4) << "Yolo reorg supports only 4 dimension."; + ICHECK(data->shape.size() == 4) << "Yolo reorg supports only 4 dimension."; std::vector oshape(data->shape.begin(), data->shape.end()); oshape[1] = oshape[1] * param->stride * param->stride; oshape[2] = indexdiv(oshape[2], param->stride); @@ -80,7 +80,7 @@ Its function is mostly shape transform.")doc" TVM_ADD_FILELINE) .set_attr("FTVMCompute", [](const Attrs& attrs, const Array& inputs, const Type& out_type) { const auto* params = attrs.as(); - CHECK(params != nullptr); + ICHECK(params != nullptr); return Array{topi::vision::reorg(inputs[0], params->stride)}; }); diff --git a/src/relay/op/vm/vm.cc b/src/relay/op/vm/vm.cc index 424ed5f4bc98..8c1c9f3e9c59 100644 --- a/src/relay/op/vm/vm.cc +++ b/src/relay/op/vm/vm.cc @@ -69,12 +69,12 @@ TVM_REGISTER_GLOBAL("relay.op.vm.shape_func") bool ShapeFuncRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 4u); + ICHECK_EQ(types.size(), 4u); auto shape_func_attrs = attrs.as(); - CHECK(shape_func_attrs != nullptr) << "Internal compiler error"; + ICHECK(shape_func_attrs != nullptr) << "Internal compiler error"; auto func_type = types[0].as(); - CHECK(func_type != nullptr); + ICHECK(func_type != nullptr); auto tuple = TupleType(func_type->arg_types); auto in_types = FlattenTupleType(tuple); @@ -137,20 +137,20 @@ RELAY_REGISTER_OP("vm.shape_func") // vm.invoke_tvm_op bool InvokeTVMOpRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 4u); + ICHECK_EQ(types.size(), 4u); auto func_type = types[0].as(); - CHECK(func_type != nullptr) << "input must be operator with known type"; + ICHECK(func_type != nullptr) << "input must be operator with known type"; auto input_type = types[1].as(); auto output_type = types[2].as(); - CHECK(input_type != nullptr) + ICHECK(input_type != nullptr) << "internal invariant violated: invoke_tvm_op inputs must be a tuple"; - CHECK(output_type != nullptr) + ICHECK(output_type != nullptr) << "internal invariant violated: invoke_tvm_op outputs must be a tuple"; Type ex_output; if (func_type->ret_type.as()) { ex_output = TupleType({func_type->ret_type}); } else { - CHECK(func_type->ret_type.as()) << "should be tuple type"; + ICHECK(func_type->ret_type.as()) << "should be tuple type"; ex_output = func_type->ret_type; } auto ex_input = TupleType(func_type->arg_types); @@ -188,11 +188,11 @@ TVM_REGISTER_NODE_TYPE(ReshapeTensorAttrs); bool ReshapeTensorRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3u); + ICHECK_EQ(types.size(), 3u); auto reshape_attrs = attrs.as(); - CHECK(reshape_attrs); + ICHECK(reshape_attrs); auto tt = types[0].as(); - CHECK(tt) << "input must be tensor type"; + ICHECK(tt) << "input must be tensor type"; reporter->Assign(types[2], TensorType(reshape_attrs->newshape, tt->dtype)); return true; } diff --git a/src/relay/qnn/op/concatenate.cc b/src/relay/qnn/op/concatenate.cc index 88d2ecc9b45b..7a716a1ec498 100644 --- a/src/relay/qnn/op/concatenate.cc +++ b/src/relay/qnn/op/concatenate.cc @@ -38,7 +38,7 @@ namespace qnn { bool QnnConcatenateRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 6); + ICHECK_EQ(types.size(), 6); // Check the scale and zero point types const auto* input_scales_tuple = types[1].as(); @@ -48,7 +48,7 @@ bool QnnConcatenateRel(const Array& types, int num_inputs, const Attrs& at << PrettyPrint(types[1])); } for (const auto& input_scale : input_scales_tuple->fields) { - CHECK(IsScalarType(input_scale, DataType::Float(32))); // input_scales[idx] + ICHECK(IsScalarType(input_scale, DataType::Float(32))); // input_scales[idx] } const auto* input_zero_points_tuple = types[2].as(); @@ -58,11 +58,11 @@ bool QnnConcatenateRel(const Array& types, int num_inputs, const Attrs& at << PrettyPrint(types[2])); } for (const auto& input_zero_point : input_zero_points_tuple->fields) { - CHECK(IsScalarType(input_zero_point, DataType::Int(32))); // input_zero_points[idx] + ICHECK(IsScalarType(input_zero_point, DataType::Int(32))); // input_zero_points[idx] } - CHECK(IsScalarType(types[3], DataType::Float(32))); // output_scale - CHECK(IsScalarType(types[4], DataType::Int(32))); // output_zero_point + ICHECK(IsScalarType(types[3], DataType::Float(32))); // output_scale + ICHECK(IsScalarType(types[4], DataType::Int(32))); // output_zero_point // Collect the input tensor and output tensor devoid of scale and zero points to reuse Relay // Concatenate infer type function. @@ -74,9 +74,9 @@ Array> QnnConcatenateLayout(const Attrs& attrs, const Array& old_in_layouts, const Array& old_in_types) { // Collect the layouts and types to reuse Relay Concatenate Infer Correct Layout. - CHECK_EQ(old_in_types.size(), 5); + ICHECK_EQ(old_in_types.size(), 5); auto input_tuple_type = old_in_types[0].as(); - CHECK(input_tuple_type); + ICHECK(input_tuple_type); auto num_input_tensors = input_tuple_type->fields.size(); Array relay_new_in_layouts(nullptr); @@ -126,19 +126,19 @@ Expr MakeQnnConcatenate(Expr data, Expr input_scales, Expr input_zero_points, Ex Expr ConcatenateQnnCanonicalize(const Attrs& attrs, const Array& new_args, const Array& arg_types) { // Get the attrs. - CHECK_EQ(new_args.size(), 5); + ICHECK_EQ(new_args.size(), 5); auto& data = new_args[0]; auto& input_scales = new_args[1]; auto& input_zero_points = new_args[2]; auto& output_scale = new_args[3]; auto& output_zero_point = new_args[4]; const auto* concatenate_attrs = attrs.as(); - CHECK(concatenate_attrs != nullptr); + ICHECK(concatenate_attrs != nullptr); // Get the input dtype and shape. - CHECK_GE(arg_types.size(), 1); + ICHECK_GE(arg_types.size(), 1); auto tuple_type = arg_types[0].as(); - CHECK(tuple_type != nullptr); + ICHECK(tuple_type != nullptr); // FIXME (anijain2305) - The lowering can be further optimized. Instead of inserting requantize in // the start, we can insert requantize at the end if and only if all the input tensors have same @@ -156,13 +156,13 @@ Expr ConcatenateQnnCanonicalize(const Attrs& attrs, const Array& new_args, tuple_exprs.push_back(TupleGetItem(call, i)); } } - CHECK(!tuple_exprs.empty()); + ICHECK(!tuple_exprs.empty()); auto tuple_input_scales = input_scales.as(); - CHECK(tuple_input_scales != nullptr); + ICHECK(tuple_input_scales != nullptr); auto tuple_input_zero_points = input_zero_points.as(); - CHECK(tuple_input_zero_points != nullptr); + ICHECK(tuple_input_zero_points != nullptr); int idx = 0; Array requantized_exprs; diff --git a/src/relay/qnn/op/convolution.cc b/src/relay/qnn/op/convolution.cc index 73ee4561907d..a9f2f361f2b3 100644 --- a/src/relay/qnn/op/convolution.cc +++ b/src/relay/qnn/op/convolution.cc @@ -42,34 +42,34 @@ namespace qnn { bool QnnConv2DRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 7); + ICHECK_EQ(types.size(), 7); const auto* data = types[0].as(); const auto* weight = types[1].as(); if (data == nullptr || weight == nullptr) return false; const auto* param = attrs.as(); - CHECK(param != nullptr) << "Conv2DAttrs cannot be nullptr."; - CHECK(data->dtype == DataType::Int(8) || data->dtype == DataType::UInt(8)) + ICHECK(param != nullptr) << "Conv2DAttrs cannot be nullptr."; + ICHECK(data->dtype == DataType::Int(8) || data->dtype == DataType::UInt(8)) << "Expected qnn conv2d type(int8, uint8) for input but was " << data->dtype; - CHECK(weight->dtype == DataType::Int(8) || weight->dtype == DataType::UInt(8)) + ICHECK(weight->dtype == DataType::Int(8) || weight->dtype == DataType::UInt(8)) << "Expected qnn conv2d type(int8, uint8) for weight but was " << weight->dtype; - CHECK(param->out_dtype == DataType::Int(16) || param->out_dtype == DataType::Int(32)) + ICHECK(param->out_dtype == DataType::Int(16) || param->out_dtype == DataType::Int(32)) << "Expected qnn conv2d type(int32, int16) for output but was " << param->out_dtype; - CHECK(param->out_dtype.bits() > 0) << "Output dtype bits should be greater than 0."; + ICHECK(param->out_dtype.bits() > 0) << "Output dtype bits should be greater than 0."; // Check the types of scale and zero points. - CHECK(IsScalarType(types[2], DataType::Int(32))); // input_zero_point - CHECK(IsScalarType(types[3], DataType::Int(32))); // kernel_zero_point - CHECK(IsScalarType(types[4], DataType::Float(32))); // input_scale + ICHECK(IsScalarType(types[2], DataType::Int(32))); // input_zero_point + ICHECK(IsScalarType(types[3], DataType::Int(32))); // kernel_zero_point + ICHECK(IsScalarType(types[4], DataType::Float(32))); // input_scale // Kernel scale can be a vector of length output_channels or a scalar. if (param->groups == 1) { size_t axis = param->kernel_layout.operator std::string().find('O'); - CHECK(axis != std::string::npos) << "Kernel layout attribute is not defined"; + ICHECK(axis != std::string::npos) << "Kernel layout attribute is not defined"; AssignType(types[5], DataType::Float(32), weight->shape[axis], reporter); // kernel scale } else { // Here, total number of output channels depend on depth multiplier. size_t o_axis = param->kernel_layout.operator std::string().find('O'); size_t i_axis = param->kernel_layout.operator std::string().find('I'); - CHECK(o_axis != std::string::npos || i_axis != std::string::npos) + ICHECK(o_axis != std::string::npos || i_axis != std::string::npos) << "Kernel layout attribute is not defined"; AssignType(types[5], DataType::Float(32), weight->shape[i_axis] * weight->shape[o_axis], reporter); // kernel scale @@ -628,18 +628,18 @@ Expr Conv2DCombineTerms(const Expr& term1, const Expr& term2, const Expr& term3, */ Expr QnnConv2DCanonicalize(const Attrs& attrs, const Array& new_args, const Array& arg_types) { - CHECK_EQ(new_args.size(), 6); + ICHECK_EQ(new_args.size(), 6); Expr data = new_args[0]; Expr weight = new_args[1]; Expr input_zero_point = new_args[2]; Expr kernel_zero_point = new_args[3]; const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); // Assertion checks for exisiing support. - CHECK(param->data_layout == "NCHW" || param->data_layout == "NHWC") + ICHECK(param->data_layout == "NCHW" || param->data_layout == "NHWC") << "qnn.conv2d supports only NCHW/NHWC input data layout."; - CHECK(param->kernel_layout == "OIHW" || param->kernel_layout == "HWIO" || - param->kernel_layout == "HWOI") + ICHECK(param->kernel_layout == "OIHW" || param->kernel_layout == "HWIO" || + param->kernel_layout == "HWOI") << "qnn.conv2d supports only OIHW/HWIO/HWOI kernel data layout."; int batch_size, in_channels, out_channels, kernel_h, kernel_w, channel_multiplier; @@ -655,14 +655,14 @@ Expr QnnConv2DCanonicalize(const Attrs& attrs, const Array& new_args, // traverse the elements in dilated manner. Currently, we do not have strided pool. So, in case of // dilated conv with non-zero kernel point, we fall back to simpler but slow lowering. - CHECK_EQ(param->dilation.size(), 2) << "qnn.conv2d only supports 2D dilation"; + ICHECK_EQ(param->dilation.size(), 2) << "qnn.conv2d only supports 2D dilation"; auto dilation_h = get_const_int(param->dilation[0]); auto dilation_w = get_const_int(param->dilation[1]); if ((kernel_zero_point_int != 0 && (dilation_h != 1 || dilation_w != 1)) || (param->groups != 1 && !is_depthwise(param))) { return Conv2DFallBack(data, weight, input_zero_point, kernel_zero_point, param); } else if (is_depthwise(param)) { - CHECK_NE(channel_multiplier, -1); + ICHECK_NE(channel_multiplier, -1); auto padded_data = Conv2DPadInput(data, input_zero_point, param); auto term1 = Conv2DFirstTerm(padded_data, weight, param); auto term2 = DepthwiseConv2DSecondTerm(padded_data, kernel_zero_point, param, kernel_h, diff --git a/src/relay/qnn/op/dense.cc b/src/relay/qnn/op/dense.cc index e1cbfaf98df1..62988c8cc52f 100644 --- a/src/relay/qnn/op/dense.cc +++ b/src/relay/qnn/op/dense.cc @@ -39,26 +39,26 @@ namespace qnn { bool QnnDenseRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 7); + ICHECK_EQ(types.size(), 7); const auto* data = types[0].as(); const auto* weight = types[1].as(); if (data == nullptr || weight == nullptr) return false; const auto* param = attrs.as(); - CHECK(param != nullptr) << "DenseAttrs cannot be nullptr."; - CHECK(data->dtype == DataType::Int(8) || data->dtype == DataType::UInt(8)) + ICHECK(param != nullptr) << "DenseAttrs cannot be nullptr."; + ICHECK(data->dtype == DataType::Int(8) || data->dtype == DataType::UInt(8)) << "Expected quantized dense type(int8, uint8) for input but was " << data->dtype; - CHECK(weight->dtype == DataType::Int(8) || weight->dtype == DataType::UInt(8)) + ICHECK(weight->dtype == DataType::Int(8) || weight->dtype == DataType::UInt(8)) << "Expected quantized dense type(int8, uint8) for weight but was " << weight->dtype; - CHECK(param->out_dtype == DataType::Int(32)) + ICHECK(param->out_dtype == DataType::Int(32)) << "Expected quantized dense type(int32) for output but was " << param->out_dtype; // Check the types of scale and zero points. - CHECK(IsScalarType(types[2], DataType::Int(32))); // input_zero_point - CHECK(IsScalarType(types[3], DataType::Int(32))); // kernel_zero_point - CHECK(IsScalarType(types[4], DataType::Float(32))); // input_scale + ICHECK(IsScalarType(types[2], DataType::Int(32))); // input_zero_point + ICHECK(IsScalarType(types[3], DataType::Int(32))); // kernel_zero_point + ICHECK(IsScalarType(types[4], DataType::Float(32))); // input_scale AssignType(types[5], DataType::Float(32), param->units, reporter); - CHECK(param->out_dtype.bits() > 0) << "Output dtype bits should be greater than 0."; + ICHECK(param->out_dtype.bits() > 0) << "Output dtype bits should be greater than 0."; // Collect the input tensor and output tensor devoid of scale and zero points to reuse Relay // Dense infer type function. @@ -133,7 +133,7 @@ Expr DenseFourthTerm(int input_zero_point_int, int kernel_zero_point_int, int re */ Expr QnnDenseCanonicalize(const Attrs& attrs, const Array& new_args, const Array& arg_types) { - CHECK_EQ(new_args.size(), 6); + ICHECK_EQ(new_args.size(), 6); Expr quantized_data = new_args[0]; Expr quantized_kernel = new_args[1]; Expr input_zero_point = new_args[2]; diff --git a/src/relay/qnn/op/dequantize.cc b/src/relay/qnn/op/dequantize.cc index 0a81f3fe4fdb..2e7a28624e26 100644 --- a/src/relay/qnn/op/dequantize.cc +++ b/src/relay/qnn/op/dequantize.cc @@ -38,7 +38,7 @@ TVM_REGISTER_NODE_TYPE(DequantizeAttrs); bool DequantizeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 4); + ICHECK_EQ(types.size(), 4); const auto* data = types[0].as(); if (data == nullptr) { @@ -46,17 +46,17 @@ bool DequantizeRel(const Array& types, int num_inputs, const Attrs& attrs, } const auto input_dtype = data->dtype; - CHECK(input_dtype == DataType::Int(8) || input_dtype == DataType::UInt(8) || - input_dtype == DataType::Int(32)) + ICHECK(input_dtype == DataType::Int(8) || input_dtype == DataType::UInt(8) || + input_dtype == DataType::Int(32)) << "Input type should be one of the quantized types [unit8, int8, int32] but was " << input_dtype; const auto* dequantize_attrs = attrs.as(); int axis = dequantize_attrs->axis; axis = (axis == -1) ? data->shape.size() - 1 : axis; - CHECK_LT(axis, static_cast(data->shape.size())) + ICHECK_LT(axis, static_cast(data->shape.size())) << "axis " << dequantize_attrs->axis << " is out of range"; - CHECK_GE(axis, 0) << "axis " << dequantize_attrs->axis << " is out of range"; + ICHECK_GE(axis, 0) << "axis " << dequantize_attrs->axis << " is out of range"; // Check and assign types for scale and zero points. AssignType(types[1], DataType::Float(32), data->shape[axis], reporter); // scale @@ -103,22 +103,22 @@ Expr DequantizeLower(const Expr& input_tensor, const Expr& input_scale, Expr DequantizeQnnCanonicalize(const Attrs& attrs, const Array& new_args, const Array& types) { - CHECK_EQ(new_args.size(), 3); + ICHECK_EQ(new_args.size(), 3); auto& data = new_args[0]; auto& input_scale = new_args[1]; auto& input_zero_point = new_args[2]; - CHECK_EQ(types.size(), 4); + ICHECK_EQ(types.size(), 4); // Get attrs. const auto* dequantize_attrs = attrs.as(); - CHECK(dequantize_attrs != nullptr); + ICHECK(dequantize_attrs != nullptr); // Find input shape. - CHECK_EQ(types.size(), 4); + ICHECK_EQ(types.size(), 4); auto in_type = types[0]; auto in_tensor_type = in_type.as(); - CHECK(in_tensor_type != nullptr) << "Type information missing." - << " Please run infer_type pass."; + ICHECK(in_tensor_type != nullptr) << "Type information missing." + << " Please run infer_type pass."; Array input_shape = in_tensor_type->shape; return DequantizeLower(data, input_scale, input_zero_point, input_shape, dequantize_attrs); diff --git a/src/relay/qnn/op/op_common.h b/src/relay/qnn/op/op_common.h index 3ca8f64ac9d9..330802c4c9b1 100644 --- a/src/relay/qnn/op/op_common.h +++ b/src/relay/qnn/op/op_common.h @@ -68,7 +68,7 @@ struct QnnBinaryOpArguments { Expr output_zero_point; explicit QnnBinaryOpArguments(const Array& new_args) { - CHECK_EQ(new_args.size(), kNumQnnBinaryOpInputs); + ICHECK_EQ(new_args.size(), kNumQnnBinaryOpInputs); int idx = 0; lhs = new_args[idx++]; rhs = new_args[idx++]; @@ -78,7 +78,7 @@ struct QnnBinaryOpArguments { rhs_zero_point = new_args[idx++]; output_scale = new_args[idx++]; output_zero_point = new_args[idx++]; - CHECK_EQ(idx, kNumQnnBinaryOpInputs); + ICHECK_EQ(idx, kNumQnnBinaryOpInputs); } }; @@ -92,9 +92,9 @@ struct QnnBinaryOpTensorType { Array shape; explicit QnnBinaryOpTensorType(const Array& arg_types, const int32_t arg_idx) { - CHECK_EQ(arg_types.size(), kNumQnnBinaryOpArgTypes); + ICHECK_EQ(arg_types.size(), kNumQnnBinaryOpArgTypes); auto tensor_type = arg_types[arg_idx].as(); - CHECK(tensor_type != nullptr); + ICHECK(tensor_type != nullptr); dtype = tensor_type->dtype; shape = tensor_type->shape; } @@ -168,15 +168,15 @@ inline Array > QnnBinaryBroadcastLayout(const Attrs& attrs, static inline bool QnnBroadcastRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), kNumQnnBinaryOpArgTypes); + ICHECK_EQ(types.size(), kNumQnnBinaryOpArgTypes); // Check the scale and zero point types - CHECK(IsScalarType(types[2], DataType::Float(32))); // lhs_scale - CHECK(IsScalarType(types[3], DataType::Int(32))); // lhs_zero_point - CHECK(IsScalarType(types[4], DataType::Float(32))); // rhs_scale - CHECK(IsScalarType(types[5], DataType::Int(32))); // rhs_zero_point - CHECK(IsScalarType(types[6], DataType::Float(32))); // output_scale - CHECK(IsScalarType(types[7], DataType::Int(32))); // output_zero_point + ICHECK(IsScalarType(types[2], DataType::Float(32))); // lhs_scale + ICHECK(IsScalarType(types[3], DataType::Int(32))); // lhs_zero_point + ICHECK(IsScalarType(types[4], DataType::Float(32))); // rhs_scale + ICHECK(IsScalarType(types[5], DataType::Int(32))); // rhs_zero_point + ICHECK(IsScalarType(types[6], DataType::Float(32))); // output_scale + ICHECK(IsScalarType(types[7], DataType::Int(32))); // output_zero_point // Collect the input tensor and output tensor devoid of scale and zero points to reuse Relay // BroadcastRel infer type function. diff --git a/src/relay/qnn/op/quantize.cc b/src/relay/qnn/op/quantize.cc index 07847916fae7..0622c96f04a6 100644 --- a/src/relay/qnn/op/quantize.cc +++ b/src/relay/qnn/op/quantize.cc @@ -38,7 +38,7 @@ TVM_REGISTER_NODE_TYPE(QuantizeAttrs); bool QuantizeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 4); + ICHECK_EQ(types.size(), 4); const auto* data = types[0].as(); if (data == nullptr) { @@ -46,15 +46,15 @@ bool QuantizeRel(const Array& types, int num_inputs, const Attrs& attrs, } const auto input_dtype = data->dtype; - CHECK(input_dtype == DataType::Float(32)) + ICHECK(input_dtype == DataType::Float(32)) << "Input type should be one of float32 but was " << input_dtype; const auto* quantize_attrs = attrs.as(); int axis = quantize_attrs->axis; axis = (axis == -1) ? data->shape.size() - 1 : axis; - CHECK_LT(axis, static_cast(data->shape.size())) + ICHECK_LT(axis, static_cast(data->shape.size())) << "axis " << quantize_attrs->axis << " is out of range"; - CHECK_GE(axis, 0) << "axis " << quantize_attrs->axis << " is out of range"; + ICHECK_GE(axis, 0) << "axis " << quantize_attrs->axis << " is out of range"; // Check and assign types for scale and zero points. AssignType(types[1], DataType::Float(32), data->shape[axis], reporter); // scale @@ -62,8 +62,8 @@ bool QuantizeRel(const Array& types, int num_inputs, const Attrs& attrs, const Array oshape = data->shape; const DataType out_dtype = quantize_attrs->out_dtype; - CHECK(out_dtype == DataType::Int(8) || out_dtype == DataType::UInt(8) || - out_dtype == DataType::Int(32)) + ICHECK(out_dtype == DataType::Int(8) || out_dtype == DataType::UInt(8) || + out_dtype == DataType::Int(32)) << "Output type should be one of [int8, unit8, int32] but was " << out_dtype; // assign output type reporter->Assign(types[3], TensorType(oshape, out_dtype)); @@ -113,19 +113,19 @@ Expr QuantizeLower(const Expr& input_tensor, const Expr& output_scale, Expr QuantizeQnnCanonicalize(const Attrs& attrs, const Array& new_args, const Array& types) { - CHECK_EQ(new_args.size(), 3); + ICHECK_EQ(new_args.size(), 3); auto& data = new_args[0]; auto& output_scale = new_args[1]; auto& output_zero_point = new_args[2]; const auto* quantize_attrs = attrs.as(); - CHECK(quantize_attrs != nullptr); + ICHECK(quantize_attrs != nullptr); // Find input shape. - CHECK_EQ(types.size(), 4); + ICHECK_EQ(types.size(), 4); auto in_type = types[0]; auto in_tensor_type = in_type.as(); - CHECK(in_tensor_type != nullptr) << "Type information missing." - << " Please run infer_type pass."; + ICHECK(in_tensor_type != nullptr) << "Type information missing." + << " Please run infer_type pass."; Array input_shape = in_tensor_type->shape; return QuantizeLower(data, output_scale, output_zero_point, input_shape, quantize_attrs); diff --git a/src/relay/qnn/op/requantize.cc b/src/relay/qnn/op/requantize.cc index 3572a3980ced..8e9b31e6fc39 100644 --- a/src/relay/qnn/op/requantize.cc +++ b/src/relay/qnn/op/requantize.cc @@ -44,7 +44,7 @@ Array> RequantizeInferCorrectLayout(const Attrs& attrs, Array> old_in_shapes; for (auto old_in_t : old_in_types) { - CHECK(old_in_t.as()); + ICHECK(old_in_t.as()); old_in_shapes.push_back(old_in_t.as()->shape); } @@ -52,8 +52,8 @@ Array> RequantizeInferCorrectLayout(const Attrs& attrs, if (new_in_layouts.defined()) { // Adapt to new layout. The axis has to change. // Record original reduce axis. Convert to the modified layout axis. - CHECK_EQ(new_in_layouts.size(), 5); - CHECK_EQ(old_in_layouts.size(), 5); + ICHECK_EQ(new_in_layouts.size(), 5); + ICHECK_EQ(old_in_layouts.size(), 5); // 1) Get the axis. int axis = param->axis; @@ -90,7 +90,7 @@ Array> RequantizeInferCorrectLayout(const Attrs& attrs, param->axis = new_axis; } else if (old_in_layouts.defined()) { // If the new layout is undefined, set the old layout as the inferred layout. - CHECK_EQ(old_in_layouts.size(), 5); + ICHECK_EQ(old_in_layouts.size(), 5); Layout old_layout = old_in_layouts[0]; @@ -214,32 +214,32 @@ Expr RequantizeLower(const Expr& input_tensor, const Expr& input_scale, */ Expr RequantizeQnnCanonicalize(const Attrs& attrs, const Array& new_args, const Array& types) { - CHECK_EQ(new_args.size(), 5); + ICHECK_EQ(new_args.size(), 5); auto& quantized_data = new_args[0]; auto& input_scale = new_args[1]; auto& input_zero_point = new_args[2]; auto& output_scale = new_args[3]; auto& output_zero_point = new_args[4]; const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); // Find input shape. - CHECK_EQ(types.size(), 6); + ICHECK_EQ(types.size(), 6); auto in_type = types[0]; auto in_tensor_type = in_type.as(); - CHECK(in_tensor_type != nullptr) << "Type information missing." - << " Please run infer_type pass."; + ICHECK(in_tensor_type != nullptr) << "Type information missing." + << " Please run infer_type pass."; Array input_shape = in_tensor_type->shape; // Find the output dtype. auto out_type = types[5]; auto out_tensor_type = out_type.as(); - CHECK(out_tensor_type != nullptr) << "Type information missing." - << " Please run infer_type pass."; + ICHECK(out_tensor_type != nullptr) << "Type information missing." + << " Please run infer_type pass."; auto out_dtype = out_tensor_type->dtype; // Check rounding validity. - CHECK(param->rounding == "UPWARD" || param->rounding == "TONEAREST") + ICHECK(param->rounding == "UPWARD" || param->rounding == "TONEAREST") << "QNN requantize supports two rounding modes - UPWARD and " << "TONEAREST"; return RequantizeLower(quantized_data, input_scale, input_zero_point, output_scale, @@ -256,7 +256,7 @@ Expr RequantizeQnnCanonicalize(const Attrs& attrs, const Array& new_args, */ bool RequantizeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 6); + ICHECK_EQ(types.size(), 6); const auto* data = types[0].as(); if (data == nullptr) { @@ -264,29 +264,29 @@ bool RequantizeRel(const Array& types, int num_inputs, const Attrs& attrs, } const auto in_dtype = data->dtype; - CHECK(in_dtype == DataType::Int(8) || in_dtype == DataType::UInt(8) || - in_dtype == DataType::Int(32)) + ICHECK(in_dtype == DataType::Int(8) || in_dtype == DataType::UInt(8) || + in_dtype == DataType::Int(32)) << "Input type should be one of [int8, uint8, int32] but was " << in_dtype; const RequantizeAttrs* requantize_attrs = attrs.as(); int axis = requantize_attrs->axis; axis = (axis == -1) ? data->shape.size() - 1 : axis; - CHECK_LT(axis, static_cast(data->shape.size())) + ICHECK_LT(axis, static_cast(data->shape.size())) << "axis " << requantize_attrs->axis << " is out of range"; - CHECK_GE(axis, 0) << "axis " << requantize_attrs->axis << " is out of range"; + ICHECK_GE(axis, 0) << "axis " << requantize_attrs->axis << " is out of range"; // Check and assign types for scale and zero points. AssignType(types[1], DataType::Float(32), data->shape[axis], reporter); // input_scale AssignType(types[2], DataType::Int(32), data->shape[axis], reporter); // input_zero_pt // For now, requantize output tensor is limited to full tensor uniform quantization. - CHECK(IsScalarType(types[3], DataType::Float(32))); // output_scale - CHECK(IsScalarType(types[4], DataType::Int(32))); // output_zero_point + ICHECK(IsScalarType(types[3], DataType::Float(32))); // output_scale + ICHECK(IsScalarType(types[4], DataType::Int(32))); // output_zero_point const Array oshape = data->shape; // assign output type auto out_dtype = requantize_attrs->out_dtype; - CHECK(out_dtype == DataType::Int(8) || out_dtype == DataType::UInt(8) || - out_dtype == DataType::Int(32)) + ICHECK(out_dtype == DataType::Int(8) || out_dtype == DataType::UInt(8) || + out_dtype == DataType::Int(32)) << "Output type should be one of [int8, uint8, int32] but was " << out_dtype; reporter->Assign(types[5], TensorType(oshape, out_dtype)); return true; diff --git a/src/relay/qnn/utils.cc b/src/relay/qnn/utils.cc index fc59b61cc6a5..982efa0a61c1 100644 --- a/src/relay/qnn/utils.cc +++ b/src/relay/qnn/utils.cc @@ -46,12 +46,12 @@ std::pair GetFixedPointMultiplierShift(double double_multiplie // multiplying the double value with 2^31 and then casting to int. significand_d = std::round(significand_d * (1ll << 31)); auto significand_int64 = static_cast(significand_d); - CHECK_LE(significand_int64, (1ll << 31)); + ICHECK_LE(significand_int64, (1ll << 31)); if (significand_int64 == (1ll << 31)) { significand_int64 /= 2; ++exponent; } - CHECK_LE(significand_int64, std::numeric_limits::max()); + ICHECK_LE(significand_int64, std::numeric_limits::max()); significand = static_cast(significand_int64); return std::make_pair(significand, exponent); } diff --git a/src/relay/qnn/utils.h b/src/relay/qnn/utils.h index f8885c36d162..ab5c9a4fbbe2 100644 --- a/src/relay/qnn/utils.h +++ b/src/relay/qnn/utils.h @@ -41,16 +41,16 @@ namespace qnn { static inline Array get_shape(const Type& type) { auto input_tt = type.as(); - CHECK(input_tt != nullptr) << "Type information missing." - << " Please run infer_type pass."; + ICHECK(input_tt != nullptr) << "Type information missing." + << " Please run infer_type pass."; return input_tt->shape; } static inline int32_t GetQmin(const DataType& dtype) { - CHECK_LE(dtype.bits(), 32) << "QNN ops support int32 or lower precision"; + ICHECK_LE(dtype.bits(), 32) << "QNN ops support int32 or lower precision"; if (dtype.is_int() || dtype.is_uint()) { auto* min_value = tir::as_const_int(tvm::min_value(dtype)); - CHECK(min_value != nullptr); + ICHECK(min_value != nullptr); return static_cast(min_value[0]); } else { LOG(FATAL) << "Type not supported " << dtype; @@ -59,10 +59,10 @@ static inline int32_t GetQmin(const DataType& dtype) { } static inline int32_t GetQmax(const DataType& dtype) { - CHECK_LE(dtype.bits(), 32) << "QNN ops support int32 or lower precision"; + ICHECK_LE(dtype.bits(), 32) << "QNN ops support int32 or lower precision"; if (dtype.is_int() || dtype.is_uint()) { auto* max_value = tir::as_const_int(tvm::max_value(dtype)); - CHECK(max_value != nullptr); + ICHECK(max_value != nullptr); return static_cast(max_value[0]); } else { LOG(FATAL) << "Type not supported " << dtype; @@ -109,7 +109,7 @@ static inline Expr Requantize(const Expr& data, const Array& input_sh static inline int64_t get_const_int(const tvm::PrimExpr& x) { auto* value_ptr = tir::as_const_int(x); - CHECK(value_ptr) << "Expr is not a constant int"; + ICHECK(value_ptr) << "Expr is not a constant int"; return value_ptr[0]; } @@ -172,10 +172,10 @@ Expr FixedPointMultiplyPerChannel(Expr tensor, std::vector multiplier, */ static inline bool IsScalarType(const Type& expr_type, const DataType& dtype) { const auto* tensor_type = expr_type.as(); - CHECK(tensor_type) << "Only tensor type can be checked for scalar values. But got" - << AsText(expr_type, false); - CHECK_EQ(tensor_type->shape.size(), 0); - CHECK(tensor_type->dtype == dtype) << "Expected " << dtype << " but got " << tensor_type->dtype; + ICHECK(tensor_type) << "Only tensor type can be checked for scalar values. But got" + << AsText(expr_type, false); + ICHECK_EQ(tensor_type->shape.size(), 0); + ICHECK(tensor_type->dtype == dtype) << "Expected " << dtype << " but got " << tensor_type->dtype; return true; } @@ -190,9 +190,10 @@ static inline void AssignType(const Type& expr_type, const DataType& dtype, cons const TypeReporter& reporter) { // Scale/Zero_points can be either const scalar or a vector with C axis num elems. const auto* tensor_type = expr_type.as(); - CHECK(tensor_type) << "Can assign type to Tensor type only. But got " << AsText(expr_type, false); + ICHECK(tensor_type) << "Can assign type to Tensor type only. But got " + << AsText(expr_type, false); const auto tensor_dtype = tensor_type->dtype; - CHECK(tensor_dtype == dtype) << "Expected type is " << dtype << " but received " << tensor_dtype; + ICHECK(tensor_dtype == dtype) << "Expected type is " << dtype << " but received " << tensor_dtype; if (tensor_type->shape.size() != 0) { reporter->Assign(expr_type, TensorType({shape}, tensor_type->dtype)); } @@ -201,7 +202,7 @@ static inline void AssignType(const Type& expr_type, const DataType& dtype, cons static inline std::vector GetFloatVectorFromConstant(const Expr& expr) { const auto* n = expr.as(); std::vector vals; - CHECK(n) << "Expr must be a constant expr - " << AsText(expr, false); + ICHECK(n) << "Expr must be a constant expr - " << AsText(expr, false); int64_t num_elems = 1; auto shape = n->data.Shape(); for (size_t i = 0; i < shape.size(); i++) { diff --git a/src/relay/quantize/annotate.cc b/src/relay/quantize/annotate.cc index 8ae7df9e2941..3def616e9423 100644 --- a/src/relay/quantize/annotate.cc +++ b/src/relay/quantize/annotate.cc @@ -83,7 +83,7 @@ Pass QuantizeAnnotate() { std::function fmulti_ref = [](const Expr& e) { if (e->IsInstance()) { const auto* n = e.as(); - CHECK(n); + ICHECK(n); const PackedFunc* f = runtime::Registry::Get("relay.quantize.attach_simulated_quantize"); Expr ret = (*f)(n->expr, static_cast(kQInput)); return static_cast(QAnnotateExpr(ret, kQInput)); diff --git a/src/relay/quantize/calibrate.cc b/src/relay/quantize/calibrate.cc index ea42a198bf84..0ac445295496 100644 --- a/src/relay/quantize/calibrate.cc +++ b/src/relay/quantize/calibrate.cc @@ -71,7 +71,7 @@ static float ComputeEntropy(float* p, float* q, size_t size) { float q_sum = std::accumulate(q, q + size, 0.f); float ret = 0; for (size_t i = 0; i < size; i++) { - CHECK(p[i] > 0 && q[i] > 0); + ICHECK(p[i] > 0 && q[i] > 0); p[i] /= p_sum; q[i] /= q_sum; if (p[i] && q[i]) ret += p[i] * std::log(p[i] / q[i]); @@ -150,7 +150,7 @@ class StatsCollector : private ExprMutator { Expr Collect(const Expr& expr) { auto new_e = this->Mutate(expr); const FunctionNode* func = new_e.as(); - CHECK(func) << "Input shoule be Function"; + ICHECK(func) << "Input shoule be Function"; Expr new_body = Tuple(std::move(profile_data_)); return Function(FreeVars(new_body), new_body, NullValue(), func->type_params, func->attrs); @@ -163,7 +163,7 @@ class StatsCollector : private ExprMutator { Expr VisitExpr_(const CallNode* call) { Expr new_e = ExprMutator::VisitExpr_(call); const CallNode* new_call = new_e.as(); - CHECK(new_call); + ICHECK(new_call); if (new_call->op == simulated_quantize_op_) { auto attrs = new_call->attrs.as(); // rewrite the annotation @@ -178,7 +178,7 @@ class StatsCollector : private ExprMutator { // add non-const expressions to profile data if (attrs->kind != QAnnotateKind::kQWeight) { - CHECK(!quantize_input.as()); + ICHECK(!quantize_input.as()); profile_data_.push_back(identity_quantize); } return identity_quantize; diff --git a/src/relay/quantize/quantize.cc b/src/relay/quantize/quantize.cc index 64a02fff1dca..846367c9c8a9 100644 --- a/src/relay/quantize/quantize.cc +++ b/src/relay/quantize/quantize.cc @@ -39,9 +39,9 @@ TVM_REGISTER_NODE_TYPE(SimulatedQuantizeAttrs); bool SimulatedQuantizeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 5); + ICHECK_EQ(types.size(), 5); const auto param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); const auto* data = types[0].as(); @@ -49,7 +49,7 @@ bool SimulatedQuantizeRel(const Array& types, int num_inputs, const Attrs& return false; } - CHECK_NE(data->shape.size(), 0) << "Input shape cannot be empty"; + ICHECK_NE(data->shape.size(), 0) << "Input shape cannot be empty"; reporter->Assign(types[1], TensorType({}, DataType::Float(32))); // dom_scale reporter->Assign(types[2], TensorType({}, DataType::Float(32))); // clip_min diff --git a/src/relay/quantize/realize.cc b/src/relay/quantize/realize.cc index 4b598907e76e..c96a1b063e98 100644 --- a/src/relay/quantize/realize.cc +++ b/src/relay/quantize/realize.cc @@ -107,7 +107,7 @@ inline Expr MulAndDiv(Expr data, float s1, float s2, DataType dtype, float factor = s1 / s2; float shift_factor = std::log2(factor); - CHECK_GT(shift_factor, 0); + ICHECK_GT(shift_factor, 0); if (static_cast(shift_factor) == shift_factor) { return LeftShift(data, MakeConstantScalar(dtype, static_cast(shift_factor))); } else if (static_cast(factor) == factor) { @@ -129,7 +129,7 @@ Expr QuantizeRealize(const Call& ref_call, const Array& new_args, const Ob const QConfig& cfg = QConfig::Current(); // do not handle data type cast const auto param = ref_call->attrs.as(); - CHECK_EQ(param->rounding, "round"); + ICHECK_EQ(param->rounding, "round"); Expr dom_scale = new_args[1]; Expr clip_min = new_args[2]; @@ -153,7 +153,7 @@ Expr QuantizeRealize(const Call& ref_call, const Array& new_args, const Ob } float shift_nbit = std::log2(odom_scale_imm / idom_scale_imm); - CHECK_NE(shift_nbit, 0); + ICHECK_NE(shift_nbit, 0); if (static_cast(shift_nbit) == shift_nbit) { if (shift_nbit > 0) { // use right shift @@ -186,7 +186,7 @@ Expr QuantizeRealize(const Call& ref_call, const Array& new_args, const Ob } // quantize from real - CHECK(!new_args[0]->IsInstance()); + ICHECK(!new_args[0]->IsInstance()); Expr data = new_args[0]; Expr scaled_data = Multiply(data, MakeConstantScalar(DataType::Float(32), 1 / dom_scale_imm)); Expr round_data = Clip(Round(scaled_data), clip_min_imm, clip_max_imm); @@ -205,14 +205,14 @@ RELAY_REGISTER_OP("relay.op.annotation.simulated_quantize") Expr Conv2dRealize(const Call& ref_call, const Array& new_args, const ObjectRef& ctx) { const QConfig& cfg = QConfig::Current(); - CHECK_EQ(new_args.size(), 2); + ICHECK_EQ(new_args.size(), 2); if (!new_args[0]->IsInstance() && !new_args[1]->IsInstance()) { return Expr(nullptr); } const auto* lhs = new_args[0].as(); - CHECK(lhs); + ICHECK(lhs); const auto* rhs = new_args[1].as(); - CHECK(rhs); + ICHECK(rhs); Expr ldata = lhs->data; if (lhs->dtype != cfg->dtype_input) { @@ -236,7 +236,7 @@ RELAY_REGISTER_OP("nn.conv2d").set_attr("FQRealizeRewrite", Con Expr DenseRealize(const Call& ref_call, const Array& new_args, const ObjectRef& ctx) { const QConfig& cfg = QConfig::Current(); - CHECK_EQ(new_args.size(), 2); + ICHECK_EQ(new_args.size(), 2); if (!new_args[0]->IsInstance() || !new_args[1]->IsInstance()) { return Expr(nullptr); } @@ -265,7 +265,7 @@ RELAY_REGISTER_OP("nn.dense").set_attr("FQRealizeRewrite", Dens Expr MulRealize(const Call& ref_call, const Array& new_args, const ObjectRef& ctx) { const QConfig& cfg = QConfig::Current(); - CHECK_EQ(new_args.size(), 2); + ICHECK_EQ(new_args.size(), 2); if (new_args[0].as() && new_args[1].as()) { // execute the operation with activation data type. const auto* lhs = new_args[0].as(); @@ -286,7 +286,7 @@ Expr MulRealize(const Call& ref_call, const Array& new_args, const ObjectR Expr dom_scale = FoldConstantOpt(mul); return QRealizeIntExpr(ret, dom_scale, dtype); } - CHECK(!new_args[0]->IsInstance() || !new_args[1]->IsInstance()); + ICHECK(!new_args[0]->IsInstance() || !new_args[1]->IsInstance()); return Expr(nullptr); } @@ -317,13 +317,13 @@ Array UnifyDTypeScale(const Array& ref_args, const Array& args Array ret; for (auto arg : args) { const auto* nptr = arg.as(); - CHECK(nptr); + ICHECK(nptr); nptrs.push_back(nptr); ret.push_back(nptr->data); } // unify the data type - CHECK_EQ(ref_args.size(), args.size()); + ICHECK_EQ(ref_args.size(), args.size()); DataType dtype; if (ret.size() == 2 && nptrs[1]->dtype == cfg->dtype_input) { @@ -357,7 +357,7 @@ Array UnifyDTypeScale(const Array& ref_args, const Array& args } Expr AddRealize(const Call& ref_call, const Array& new_args, const ObjectRef& ctx) { - CHECK_EQ(new_args.size(), 2); + ICHECK_EQ(new_args.size(), 2); if (new_args[0].as() && new_args[1].as()) { DataType dtype; Expr dom_scale; @@ -366,14 +366,14 @@ Expr AddRealize(const Call& ref_call, const Array& new_args, const ObjectR return QRealizeIntExpr(ret, dom_scale, dtype); } - CHECK(!new_args[0]->IsInstance() && !new_args[1]->IsInstance()); + ICHECK(!new_args[0]->IsInstance() && !new_args[1]->IsInstance()); return Expr(nullptr); } RELAY_REGISTER_OP("add").set_attr("FQRealizeRewrite", AddRealize); Expr ClipRealize(const Call& ref_call, const Array& new_args, const ObjectRef& ctx) { - CHECK_EQ(new_args.size(), 1); + ICHECK_EQ(new_args.size(), 1); if (const auto* n = new_args[0].as()) { const auto ref_attrs = ref_call->attrs.as(); auto attrs = make_object(); @@ -384,20 +384,20 @@ Expr ClipRealize(const Call& ref_call, const Array& new_args, const Object Expr ret = Call(ref_call->op, {n->data}, Attrs(attrs), ref_call->type_args); return QRealizeIntExpr(ret, n->dom_scale, n->dtype); } - CHECK(!new_args[0]->IsInstance()); + ICHECK(!new_args[0]->IsInstance()); return Expr(nullptr); } RELAY_REGISTER_OP("clip").set_attr("FQRealizeRewrite", ClipRealize); Expr ConcatenateRealize(const Call& ref_call, const Array& new_args, const ObjectRef& ctx) { - CHECK_EQ(new_args.size(), 1); - CHECK_EQ(ref_call->args.size(), 1); + ICHECK_EQ(new_args.size(), 1); + ICHECK_EQ(ref_call->args.size(), 1); const auto* tuple = new_args[0].as(); const auto* ref_tuple = ref_call->args[0].as(); - CHECK(tuple); - CHECK(ref_tuple); + ICHECK(tuple); + ICHECK(ref_tuple); const Array& arr = tuple->fields; const Array& ref_arr = ref_tuple->fields; @@ -409,7 +409,7 @@ Expr ConcatenateRealize(const Call& ref_call, const Array& new_args, const return QRealizeIntExpr(ret, dom_scale, dtype); } else { for (auto arg : new_args) { - CHECK(!arg->IsInstance()); + ICHECK(!arg->IsInstance()); } return Expr(nullptr); } @@ -419,12 +419,12 @@ RELAY_REGISTER_OP("concatenate").set_attr("FQRealizeRewrite", C /* \brief forward the original operator */ Expr IdentityRealize(const Call& ref_call, const Array& new_args, const ObjectRef& ctx) { - CHECK_EQ(new_args.size(), 1); + ICHECK_EQ(new_args.size(), 1); if (const auto* n = new_args[0].as()) { Expr ret = ForwardOp(ref_call, {n->data}); return QRealizeIntExpr(ret, n->dom_scale, n->dtype); } - CHECK(!new_args[0]->IsInstance()); + ICHECK(!new_args[0]->IsInstance()); return Expr(nullptr); } @@ -442,13 +442,13 @@ RELAY_REGISTER_OP("annotation.stop_fusion") Expr CastDtypeInputRealize(const Call& ref_call, const Array& new_args, const ObjectRef& ctx) { const QConfig& cfg = QConfig::Current(); - CHECK_EQ(new_args.size(), 1); + ICHECK_EQ(new_args.size(), 1); if (const auto* n = new_args[0].as()) { Expr data = Cast(n->data, cfg->dtype_input); Expr ret = ForwardOp(ref_call, {data}); return QRealizeIntExpr(ret, n->dom_scale, cfg->dtype_input); } - CHECK(!new_args[0]->IsInstance()); + ICHECK(!new_args[0]->IsInstance()); return Expr(nullptr); } @@ -457,7 +457,7 @@ RELAY_REGISTER_OP("nn.max_pool2d") Expr AvgPoolRealize(const Call& ref_call, const Array& new_args, const ObjectRef& ctx) { const QConfig& cfg = QConfig::Current(); - CHECK_EQ(new_args.size(), 1); + ICHECK_EQ(new_args.size(), 1); if (const auto* n = new_args[0].as()) { Expr data = n->data; if (n->dtype != cfg->dtype_activation) { @@ -466,7 +466,7 @@ Expr AvgPoolRealize(const Call& ref_call, const Array& new_args, const Obj Expr ret = ForwardOp(ref_call, {data}); return QRealizeIntExpr(ret, n->dom_scale, cfg->dtype_activation); } - CHECK(!new_args[0]->IsInstance()); + ICHECK(!new_args[0]->IsInstance()); return Expr(nullptr); } @@ -477,12 +477,12 @@ RELAY_REGISTER_OP("nn.global_avg_pool2d") Expr CastHintRealize(const Call& ref_call, const Array& new_args, const ObjectRef& ctx) { const auto param = ref_call->attrs.as(); - CHECK_EQ(new_args.size(), 1); + ICHECK_EQ(new_args.size(), 1); if (const auto* n = new_args[0].as()) { Expr ret = Cast(n->data, param->dtype); return QRealizeIntExpr(ret, n->dom_scale, param->dtype); } - CHECK(!new_args[0]->IsInstance()); + ICHECK(!new_args[0]->IsInstance()); return Expr(nullptr); } diff --git a/src/relay/transforms/alter_op_layout.cc b/src/relay/transforms/alter_op_layout.cc index 7c5ee019a437..924e61ad0d16 100644 --- a/src/relay/transforms/alter_op_layout.cc +++ b/src/relay/transforms/alter_op_layout.cc @@ -97,7 +97,7 @@ class AlterTransformMemorizer : public TransformMemorizer { } const CallNode* new_call = new_e.as(); - CHECK(new_call) << "Can only replace the original operator with another call node"; + ICHECK(new_call) << "Can only replace the original operator with another call node"; return GetRef(new_call); } diff --git a/src/relay/transforms/annotate_target.cc b/src/relay/transforms/annotate_target.cc index b9d6cce762e5..7a083304515b 100644 --- a/src/relay/transforms/annotate_target.cc +++ b/src/relay/transforms/annotate_target.cc @@ -69,7 +69,7 @@ class AnnotateTargetRewriter : public ExprRewriter { if (call && call->op == CompilerBeginOp()) { // Argument is already compiler begin node meaning that this is not the first time // running this pass, so we simply remove it and will add a new one later. - CHECK_EQ(call->args.size(), 1U); + ICHECK_EQ(call->args.size(), 1U); const CallNode* end = call->args[0].as(); if (end->op == CompilerEndOp()) { arg_target = end->attrs.as()->compiler; @@ -137,13 +137,13 @@ class AnnotateTargetRewriter : public ExprRewriter { if (op_node && pre->op == CompilerBeginOp()) { // Bypass compiler begin due to lack of target information. It will be processed // when the following op handling arguments. - CHECK_EQ(pre->args.size(), 1U); + ICHECK_EQ(pre->args.size(), 1U); return post.as()->args[0]; } else if (op_node && pre->op == CompilerEndOp()) { // Override compiler end with the new target. - CHECK_EQ(pre->args.size(), 1U); + ICHECK_EQ(pre->args.size(), 1U); auto input_expr = post.as()->args[0]; - CHECK(op_expr_to_target_.find(input_expr) != op_expr_to_target_.end()); + ICHECK(op_expr_to_target_.find(input_expr) != op_expr_to_target_.end()); return InsertAnnotation(input_expr, op_expr_to_target_[input_expr], make_end_op); } // Check prior to peeking first argument @@ -164,7 +164,7 @@ class AnnotateTargetRewriter : public ExprRewriter { // TVM operators: Check target specific op checking function and add to supported_targets // if it is supported. Op op = Downcast(pre->op); - CHECK(op.defined()); + ICHECK(op.defined()); for (const auto& target : this->targets_) { if (!Op::HasAttrMap("target." + std::string(target))) { continue; @@ -178,7 +178,7 @@ class AnnotateTargetRewriter : public ExprRewriter { // Composite function: Add the target of a composite function to supported_targets // if it is in the target list. Function func = Downcast(pre->op); - CHECK(func.defined()); + ICHECK(func.defined()); if (auto comp_name = func->GetAttr(attr::kComposite)) { std::string comp_name_str = comp_name.value(); diff --git a/src/relay/transforms/canonicalize_cast.cc b/src/relay/transforms/canonicalize_cast.cc index 510d098990e3..b0e96cc47514 100644 --- a/src/relay/transforms/canonicalize_cast.cc +++ b/src/relay/transforms/canonicalize_cast.cc @@ -106,13 +106,13 @@ class CastCanonicalizer : public ExprMutator { if (call->op == cast_op_) { auto attrs = call->attrs.as(); const auto* from_type = call->args[0]->type_as(); - CHECK(from_type); + ICHECK(from_type); if (from_type->dtype.bits() < attrs->dtype.bits()) { if (++ref_counter_[call] > 1) { const CallNode* new_call = new_expr.as(); - CHECK(new_call); - CHECK(new_call->op == cast_op_); + ICHECK(new_call); + ICHECK(new_call->op == cast_op_); return Call(new_call->op, new_call->args, new_call->attrs, new_call->type_args); } } diff --git a/src/relay/transforms/canonicalize_ops.cc b/src/relay/transforms/canonicalize_ops.cc index dfb30cae4693..cf14ddcb7c5b 100644 --- a/src/relay/transforms/canonicalize_ops.cc +++ b/src/relay/transforms/canonicalize_ops.cc @@ -41,7 +41,7 @@ class BiasAddSimplifier : public ExprRewriter { auto new_n = post; if (n->op == bias_add_op_) { Call call = Downcast(new_n); - CHECK_EQ(call->args.size(), 2); + ICHECK_EQ(call->args.size(), 2); const BiasAddAttrs* param = call->attrs.as(); auto ttype = n->args[0]->type_as(); diff --git a/src/relay/transforms/combine_parallel_conv2d.cc b/src/relay/transforms/combine_parallel_conv2d.cc index 20fa3e404f6a..20b206e0423c 100644 --- a/src/relay/transforms/combine_parallel_conv2d.cc +++ b/src/relay/transforms/combine_parallel_conv2d.cc @@ -62,8 +62,8 @@ class ParallelConv2DCombiner : public ParallelOpCombiner { const Layout kOIHW("OIHW"); const auto* attrs_a = a->attrs.as(); const auto* attrs_b = b->attrs.as(); - CHECK(attrs_a); - CHECK(attrs_b); + ICHECK(attrs_a); + ICHECK(attrs_b); const auto* tweight_a = a->args[1]->type_as(); const auto* tweight_b = b->args[1]->type_as(); const auto shape_a = @@ -89,7 +89,7 @@ class ParallelConv2DCombiner : public ParallelOpCombiner { const CallNode* group_root = branches[0][0]; const auto* attrs = group_root->attrs.as(); - CHECK(attrs); + ICHECK(attrs); const auto new_attrs = make_object(); new_attrs->strides = attrs->strides; new_attrs->padding = attrs->padding; @@ -105,7 +105,7 @@ class ParallelConv2DCombiner : public ParallelOpCombiner { const std::string& layout = new_attrs->out_layout == "" ? new_attrs->data_layout : new_attrs->out_layout; channel_pos_ = layout.find('C'); - CHECK_NE(channel_pos_, std::string::npos); + ICHECK_NE(channel_pos_, std::string::npos); return Call(conv2d, {data, new_weight}, Attrs{new_attrs}, {}); } @@ -198,7 +198,7 @@ class ParallelConv2DCombiner : public ParallelOpCombiner { } auto index = branches[0][0]->attrs.as()->kernel_layout.operator std::string().find('O'); - CHECK_NE(index, std::string::npos); + ICHECK_NE(index, std::string::npos); return std::make_tuple(MakeConcatenate(Tuple(weights), index), tir::make_const(DataType::Int(32), num_filters)); } diff --git a/src/relay/transforms/combine_parallel_dense.cc b/src/relay/transforms/combine_parallel_dense.cc index 74a6921c9409..6d4c8c000f31 100644 --- a/src/relay/transforms/combine_parallel_dense.cc +++ b/src/relay/transforms/combine_parallel_dense.cc @@ -61,8 +61,8 @@ class ParallelDenseToBatchCombiner : public ParallelOpBatchCombiner { StructuralEqual eq; const auto* attrs_a = a->attrs.as(); const auto* attrs_b = b->attrs.as(); - CHECK(attrs_a); - CHECK(attrs_b); + ICHECK(attrs_a); + ICHECK(attrs_b); const auto* weight_a = a->args[1]->type_as(); const auto* weight_b = b->args[1]->type_as(); @@ -89,7 +89,7 @@ class ParallelDenseToDenseCombiner : public ParallelOpCombiner { const auto* attrs_b = b->attrs.as(); const auto* weight_a = a->args[1]->type_as(); const auto* weight_b = b->args[1]->type_as(); - CHECK(attrs_a != nullptr && attrs_b != nullptr && weight_a != nullptr && weight_b != nullptr); + ICHECK(attrs_a != nullptr && attrs_b != nullptr && weight_a != nullptr && weight_b != nullptr); // output dims (weight->shape[0]) can be different return eq(attrs_a->out_dtype, attrs_b->out_dtype) && eq(weight_a->shape[1], weight_b->shape[1]); } @@ -102,7 +102,7 @@ class ParallelDenseToDenseCombiner : public ParallelOpCombiner { // concat all weights into one std::tie(new_weight, new_output_dims) = TransformWeight(branches); const auto* origin_attrs = branches[0][0]->attrs.as(); - CHECK(origin_attrs); + ICHECK(origin_attrs); const auto dense_attrs = make_object(); dense_attrs->units = new_output_dims; dense_attrs->out_dtype = origin_attrs->out_dtype; @@ -115,7 +115,7 @@ class ParallelDenseToDenseCombiner : public ParallelOpCombiner { auto tb = b->args[index]->type_as(); auto toutput_a = a->type_as(); auto toutput_b = b->type_as(); - CHECK(ta != nullptr && tb != nullptr && toutput_a != nullptr && toutput_b != nullptr); + ICHECK(ta != nullptr && tb != nullptr && toutput_a != nullptr && toutput_b != nullptr); if (!eq(ta->dtype, tb->dtype) || ta->shape.size() != tb->shape.size()) { return false; @@ -148,7 +148,7 @@ class ParallelDenseToDenseCombiner : public ParallelOpCombiner { auto parent = branch[depth]->args[parent_index]; auto& parent_shape = parent->type_as()->shape; auto out_dim = tir::as_const_int(parent_shape[parent_shape.size() - 1]); - CHECK(out_dim != nullptr); + ICHECK(out_dim != nullptr); auto arg = branch[depth]->args[i]; auto& arg_shape = arg->type_as()->shape; @@ -158,7 +158,7 @@ class ParallelDenseToDenseCombiner : public ParallelOpCombiner { arg = MakeExpandDims(arg, -1, 1); } else { auto arg_last_dim = tir::as_const_int(arg_shape[arg_shape.size() - 1]); - CHECK(arg_last_dim != nullptr); + ICHECK(arg_last_dim != nullptr); if (*out_dim > 1 && *arg_last_dim == 1) { repeat_last_dim = true; } @@ -182,7 +182,7 @@ class ParallelDenseToDenseCombiner : public ParallelOpCombiner { const CallNode* call = branch[depth]; auto& out_shape = call->type_as()->shape; auto out_dims = tir::as_const_int(out_shape[out_shape.size() - 1]); - CHECK(out_dims != nullptr); + ICHECK(out_dims != nullptr); Array begin; Array end; Array strides; diff --git a/src/relay/transforms/combine_parallel_op.cc b/src/relay/transforms/combine_parallel_op.cc index b23d01ff469b..1c9a58f49824 100644 --- a/src/relay/transforms/combine_parallel_op.cc +++ b/src/relay/transforms/combine_parallel_op.cc @@ -64,7 +64,7 @@ std::vector BranchGroupFinder::Find(const Expr& expr) { auto&& branch = CreateBranch(child); // add the branch to a group, or create a new group auto it = std::find_if(groups.begin() + ngroups, groups.end(), [&](const Group& group) { - CHECK(!group.empty() && !group[0].empty()); + ICHECK(!group.empty() && !group[0].empty()); return fare_compatible_ops_(child, group[0][0]); }); if (it != groups.end()) { @@ -141,7 +141,7 @@ void ParallelOpCombiner::CombineBranches(const Group& branches) { for (parent_index = 0; parent_index < branches[0][i]->args.size(); parent_index++) { if (branches[0][i]->args[parent_index].get() == branches[0][i - 1]) break; } - CHECK_NE(parent_index, branches[0][i]->args.size()); + ICHECK_NE(parent_index, branches[0][i]->args.size()); if (!CheckLevel(branches, i, parent_index)) break; combined = MakeCombinedCallFromFollowingOps(combined, branches, i, parent_index); } diff --git a/src/relay/transforms/convert_layout.cc b/src/relay/transforms/convert_layout.cc index 577fb068aab9..ba443f602c19 100644 --- a/src/relay/transforms/convert_layout.cc +++ b/src/relay/transforms/convert_layout.cc @@ -112,7 +112,7 @@ class ConvertTransformMemorizer : public TransformMemorizer { } const CallNode* new_call = new_e.as(); - CHECK(new_call) << "Can only replace the original operator with another call node"; + ICHECK(new_call) << "Can only replace the original operator with another call node"; return GetRef(new_call); } diff --git a/src/relay/transforms/convert_sparse_dense.cc b/src/relay/transforms/convert_sparse_dense.cc index 36aaa478eab6..5f4dbe642c3d 100644 --- a/src/relay/transforms/convert_sparse_dense.cc +++ b/src/relay/transforms/convert_sparse_dense.cc @@ -75,9 +75,9 @@ class DenseToSparseDenseMutator : public ExprRewriter { DenseToSparseDenseMutator(const Array& weight_name, const Array >& weight_shape) : dense_op_(Op::Get("nn.dense")), sparse_dense_op_(Op::Get("nn.sparse_dense")) { - CHECK_EQ(weight_name.size(), weight_shape.size()); + ICHECK_EQ(weight_name.size(), weight_shape.size()); for (size_t i = 0; i < weight_name.size(); ++i) { - CHECK(weight_name[i]->IsInstance()); + ICHECK(weight_name[i]->IsInstance()); std::string k = weight_name[i].as()->data; const auto& ws = weight_shape[i]; std::vector v(ws.size()); diff --git a/src/relay/transforms/de_duplicate.cc b/src/relay/transforms/de_duplicate.cc index 8c62fe6100c3..43b71f6f10cc 100644 --- a/src/relay/transforms/de_duplicate.cc +++ b/src/relay/transforms/de_duplicate.cc @@ -40,8 +40,8 @@ Expr DeDup(const Expr& e) { } Var Fresh(const Var& v) { - CHECK_EQ(rename_.count(v), 0); - CHECK_EQ(memo_.count(v), 0) << v.as(); + ICHECK_EQ(rename_.count(v), 0); + ICHECK_EQ(memo_.count(v), 0) << v.as(); Var ret = Var(v->name_hint(), VisitType(v->type_annotation)); rename_[v] = ret; return ret; @@ -94,10 +94,10 @@ Expr DeDup(const Expr& e) { std::unordered_map rename_; std::unordered_map type_rename_; }; - CHECK(WellFormed(e)) << AsText(e, false); + ICHECK(WellFormed(e)) << AsText(e, false); Expr ret = DeDupMutator().VisitExpr(e); - CHECK(WellFormed(ret)); - CHECK_EQ(FreeVars(e).size(), FreeVars(ret).size()); + ICHECK(WellFormed(ret)); + ICHECK_EQ(FreeVars(e).size(), FreeVars(ret).size()); return ret; } diff --git a/src/relay/transforms/dead_code.cc b/src/relay/transforms/dead_code.cc index f6c2272a3018..2e7c08a684dc 100644 --- a/src/relay/transforms/dead_code.cc +++ b/src/relay/transforms/dead_code.cc @@ -46,7 +46,7 @@ class FindDef : private ExprVisitor { VarMap expr_map_; void VisitExpr_(const LetNode* l) final { - CHECK_EQ(expr_map_.count(l->var), 0); + ICHECK_EQ(expr_map_.count(l->var), 0); expr_map_[l->var] = l->value; VisitExpr(l->value); VisitExpr(l->body); diff --git a/src/relay/transforms/defunctionalization.cc b/src/relay/transforms/defunctionalization.cc index 135d7fcee548..14a86bc8d080 100644 --- a/src/relay/transforms/defunctionalization.cc +++ b/src/relay/transforms/defunctionalization.cc @@ -103,12 +103,12 @@ class DefuncMutator : public ExprMutator { Expr VisitExpr_(const CallNode* call) { if (auto op = call->op.as()) { - CHECK_EQ(call->type_args.size(), op->checked_type().as()->type_params.size()) + ICHECK_EQ(call->type_args.size(), op->checked_type().as()->type_params.size()) << "all type args must be explicit"; auto op_type = InstFuncType(op->checked_type().as(), call->type_args); - CHECK_EQ(FreeTypeVars(op_type, mod).size(), 0) << "free type vars in instantiated"; - CHECK(!HasFuncType(op_type->ret_type)) << "returning functions not supported"; + ICHECK_EQ(FreeTypeVars(op_type, mod).size(), 0) << "free type vars in instantiated"; + ICHECK(!HasFuncType(op_type->ret_type)) << "returning functions not supported"; if (!IsHigherOrderFunc(op_type)) { // not higher order function @@ -152,7 +152,7 @@ class DefuncMutator : public ExprMutator { // var node will be encoded as datatype // so we need to use the `apply` helper method auto var_original_type = GetUnencodedType(op->type_annotation).as(); - CHECK(var_original_type) << "var original type not saved in var_save_type map"; + ICHECK(var_original_type) << "var original type not saved in var_save_type map"; auto op_type = InstFuncType(var_original_type, call->type_args); Array args = {GetRef(op)}; @@ -209,7 +209,7 @@ class DefuncMutator : public ExprMutator { */ void AddApplyCase(GlobalVar apply_gv, FuncType ft, Constructor c, const Expr& expr, const Array patterns) { - CHECK(c->inputs.size() == patterns.size()) + ICHECK(c->inputs.size() == patterns.size()) << "constructor function and pattern vars have different sizes"; if (!mod->ContainGlobalVar(apply_gv->name_hint)) { auto x = Var("x", TypeCall(c->belong_to, {})); @@ -229,7 +229,7 @@ class DefuncMutator : public ExprMutator { } else { auto f = Downcast(mod->Lookup(apply_gv)); auto body = f->body.as(); - CHECK(body) << "internal invariant broken; apply function body should be a match node"; + ICHECK(body) << "internal invariant broken; apply function body should be a match node"; auto clauses = body->clauses; auto x = f->params[0]; @@ -245,8 +245,8 @@ class DefuncMutator : public ExprMutator { Expr EncodeArg(const Expr& arg, const Type& type) { // we assume arg is either an identifier (var or globalvar) or a function - CHECK(type.as()) << "assume no nested functions"; - CHECK(arg.as() || arg.as() || arg.as()) + ICHECK(type.as()) << "assume no nested functions"; + ICHECK(arg.as() || arg.as() || arg.as()) << "assume all first-order-parameters are identifiers or functions"; if (arg.as()) { @@ -334,11 +334,11 @@ class DefuncMutator : public ExprMutator { */ FuncType GetUnencodedType(const Type& t) { auto tc = t.as(); - CHECK(tc) << "expected type call when getting original type from encoded type"; + ICHECK(tc) << "expected type call when getting original type from encoded type"; auto gv = tc->func.as(); - CHECK(gv) << "expected global type var in encoded type"; + ICHECK(gv) << "expected global type var in encoded type"; auto type = original_func_type_map[GetRef(gv)]; - CHECK(type.defined()) << "reverse mapping from encoded type to original type not found"; + ICHECK(type.defined()) << "reverse mapping from encoded type to original type not found"; return Downcast(type); } @@ -357,8 +357,8 @@ class DefuncMutator : public ExprMutator { * \brief specialize a function type */ FuncType InstFuncType(const FuncTypeNode* fty, const Array type_args) { - CHECK(fty) << "InstFuncType functype is null"; - CHECK_EQ(fty->type_params.size(), type_args.size()) + ICHECK(fty) << "InstFuncType functype is null"; + ICHECK_EQ(fty->type_params.size(), type_args.size()) << "size mismatch between function type params and type args"; auto map = tvm::Map(); for (size_t i = 0; i < type_args.size(); i++) { @@ -372,7 +372,7 @@ class DefuncMutator : public ExprMutator { * \brief specialize a function expression */ Function Specialize(const Function& f, const Array type_args) { - CHECK_EQ(f->type_params.size(), type_args.size()) + ICHECK_EQ(f->type_params.size(), type_args.size()) << "cannot specialize function with size mismatch between function type params and type " "args"; auto map = tvm::Map(); @@ -389,7 +389,7 @@ class DefuncMutator : public ExprMutator { * using the `apply` function for applications */ Function FirstifyVars(const Function& f) { - CHECK(f->type_params.size() == 0) << "firstify function has type params"; + ICHECK(f->type_params.size() == 0) << "firstify function has type params"; tvm::Map var_bind_map; Array params; @@ -403,7 +403,7 @@ class DefuncMutator : public ExprMutator { var_bind_map.Set(var, new_var); params.push_back(new_var); } else { - CHECK(!HasFuncType(var->type_annotation)) + ICHECK(!HasFuncType(var->type_annotation)) << "nested function type in parameter not supported yet"; params.push_back(var); } @@ -416,11 +416,11 @@ class DefuncMutator : public ExprMutator { Expr Defunctionalization(const Function& f, const IRModule& mod) { // f is the starting point of the program, all types MUST be known - CHECK(f->type_params.size() == 0) << "no polymorphism supported for defunctionalization"; + ICHECK(f->type_params.size() == 0) << "no polymorphism supported for defunctionalization"; for (const auto& p : f->params) { - CHECK(!HasFuncType(p->checked_type())) << "program cannot have func type parameters"; + ICHECK(!HasFuncType(p->checked_type())) << "program cannot have func type parameters"; } - CHECK(!HasFuncType(f->ret_type)) << "return type cannot contain function"; + ICHECK(!HasFuncType(f->ret_type)) << "return type cannot contain function"; return Downcast(DefuncMutator(mod).VisitExpr(f)); } diff --git a/src/relay/transforms/device_annotation.cc b/src/relay/transforms/device_annotation.cc index b3f22e00fda4..e744fb51e0a6 100644 --- a/src/relay/transforms/device_annotation.cc +++ b/src/relay/transforms/device_annotation.cc @@ -72,16 +72,16 @@ class ValidateAnnotation : private ExprVisitor { if (IsOnDeviceNode(call_node)) { int device_type = GetDeviceId(call_node); if (annotation_map_.count(call_node)) { - CHECK_EQ(annotation_map_.at(call_node), device_type) + ICHECK_EQ(annotation_map_.at(call_node), device_type) << "An expression node can only be annotated to one device."; } else { annotation_map_.insert({call_node, GetDeviceId(call_node)}); } - CHECK_EQ(call_node->args.size(), 1U); + ICHECK_EQ(call_node->args.size(), 1U); const auto* node = call_node->args[0].operator->(); if (annotation_map_.count(node)) { - CHECK_EQ(annotation_map_.at(node), device_type) + ICHECK_EQ(annotation_map_.at(node), device_type) << "An expression node can only be annotated to one device."; } else { annotation_map_.insert({node, GetDeviceId(call_node)}); @@ -103,7 +103,7 @@ class ValidateAnnotation : private ExprVisitor { * \return The device type. */ int GetDeviceId(const CallNode* call_node) { - CHECK(IsOnDeviceNode(call_node)) << "The input call node must be on_device node."; + ICHECK(IsOnDeviceNode(call_node)) << "The input call node must be on_device node."; const OnDeviceAttrs* on_device_attr = call_node->attrs.as(); return on_device_attr->device_type; } @@ -226,7 +226,7 @@ class RewriteAnnotation : public ExprMutator { const auto sit = annotation_map_.find(src_node); if (sit == annotation_map_.end()) { const auto dit = annotation_map_.find(dst); - CHECK(dit != annotation_map_.end()) + ICHECK(dit != annotation_map_.end()) << "Device copy op is not required when both src and dst ops are not " "annotated."; return CreateDeviceCopy(src, fallback_device_, dit->second); @@ -391,7 +391,7 @@ class DeviceInfo { // Skip annotation nodes. if (!IsOnDeviceNode(call)) { if (const auto* node = GetDeviceCopyNode(call)) { - CHECK(node->IsInstance()); + ICHECK(node->IsInstance()); const auto* call_node = static_cast(node); auto attrs = call_node->attrs.as(); @@ -496,7 +496,7 @@ Expr RewriteAnnotatedOps(const Expr& expr, int fallback_device) { new_body.push_back(field); } } - CHECK_GT(new_body.size(), 0U); + ICHECK_GT(new_body.size(), 0U); if (new_body.size() == 1) { return Function(params, new_body[0], Type(nullptr), fn->type_params, fn->attrs); } else if (tuple->fields.size() == new_body.size()) { @@ -515,7 +515,7 @@ Expr RewriteAnnotatedOps(const Expr& expr, int fallback_device) { new_fields.push_back(field); } } - CHECK_GT(new_fields.size(), 0U); + ICHECK_GT(new_fields.size(), 0U); if (tuple->fields.size() == new_fields.size()) { return new_fields.size() == 1 ? new_fields[0] : new_expr; } else { diff --git a/src/relay/transforms/dynamic_to_static.cc b/src/relay/transforms/dynamic_to_static.cc index 5caaea8c9ead..d16d6328301a 100644 --- a/src/relay/transforms/dynamic_to_static.cc +++ b/src/relay/transforms/dynamic_to_static.cc @@ -39,7 +39,7 @@ class DynamicToStaticMutator : public MixedModeMutator { {Op::Get("dyn.reshape"), [](const CallNode* call_node) { if (const ConstantNode* shape = call_node->args[1].as()) { - CHECK_EQ(shape->data->ndim, 1); + ICHECK_EQ(shape->data->ndim, 1); return MakeReshape(call_node->args[0], ToVector(shape->data)); } return Expr(nullptr); @@ -47,7 +47,7 @@ class DynamicToStaticMutator : public MixedModeMutator { {Op::Get("dyn.tile"), [](const CallNode* call_node) { if (const ConstantNode* reps = call_node->args[1].as()) { - CHECK_EQ(reps->data->ndim, 1); + ICHECK_EQ(reps->data->ndim, 1); return MakeTile(call_node->args[0], ToVector(reps->data)); } return Expr(nullptr); @@ -56,7 +56,7 @@ class DynamicToStaticMutator : public MixedModeMutator { [](const CallNode* call_node) { if (const ConstantNode* k = call_node->args[1].as()) { const TopKAttrs* param = call_node->attrs.as(); - CHECK(param); + ICHECK(param); return MakeTopK(call_node->args[0], static_cast(ToScalar(k->data, 0)), param->axis, param->ret_type, param->is_ascend, param->dtype); } @@ -65,7 +65,7 @@ class DynamicToStaticMutator : public MixedModeMutator { {Op::Get("dyn.broadcast_to"), [](const CallNode* call_node) { if (const ConstantNode* shape = call_node->args[1].as()) { - CHECK_EQ(shape->data->ndim, 1); + ICHECK_EQ(shape->data->ndim, 1); return MakeBroadCastTo(call_node->args[0], ToVector(shape->data)); } return Expr(nullptr); @@ -74,7 +74,7 @@ class DynamicToStaticMutator : public MixedModeMutator { [](const CallNode* call_node) { if (const ConstantNode* shape = call_node->args[0].as()) { const InitOpAttrs* param = call_node->attrs.as(); - CHECK(param); + ICHECK(param); return MakeZeros(ToVector(shape->data), param->dtype); } return Expr(nullptr); @@ -83,7 +83,7 @@ class DynamicToStaticMutator : public MixedModeMutator { [](const CallNode* call_node) { if (const ConstantNode* shape = call_node->args[0].as()) { const InitOpAttrs* param = call_node->attrs.as(); - CHECK(param); + ICHECK(param); return MakeOnes(ToVector(shape->data), param->dtype); } return Expr(nullptr); @@ -92,7 +92,7 @@ class DynamicToStaticMutator : public MixedModeMutator { [](const CallNode* call_node) { if (const ConstantNode* depth = call_node->args[3].as()) { const OneHotAttrs* param = call_node->attrs.as(); - CHECK(param); + ICHECK(param); return MakeOneHot(call_node->args[0], call_node->args[1], call_node->args[2], static_cast(ToScalar(depth->data, 0)), param->axis, param->dtype); @@ -103,7 +103,7 @@ class DynamicToStaticMutator : public MixedModeMutator { [](const CallNode* call_node) { if (const ConstantNode* size = call_node->args[1].as()) { const ResizeAttrs* param = call_node->attrs.as(); - CHECK(param); + ICHECK(param); auto size_int = ToVector(size->data); Array size_prim; for (size_t i = 0; i < size_int.size(); ++i) { @@ -117,9 +117,9 @@ class DynamicToStaticMutator : public MixedModeMutator { {Op::Get("dyn.full"), [](const CallNode* call_node) { if (const ConstantNode* shape = call_node->args[1].as()) { - CHECK_EQ(shape->data->ndim, 1); + ICHECK_EQ(shape->data->ndim, 1); const InitOpAttrs* param = call_node->attrs.as(); - CHECK(param); + ICHECK(param); return MakeFull(call_node->args[0], ToVector(shape->data), param->dtype); } return Expr(nullptr); @@ -129,10 +129,10 @@ class DynamicToStaticMutator : public MixedModeMutator { const ConstantNode* scale_h = call_node->args[1].as(); const ConstantNode* scale_w = call_node->args[2].as(); if (scale_h && scale_w) { - CHECK_EQ(scale_h->data->ndim, 0); - CHECK_EQ(scale_w->data->ndim, 0); + ICHECK_EQ(scale_h->data->ndim, 0); + ICHECK_EQ(scale_w->data->ndim, 0); const UpSamplingAttrs* param = call_node->attrs.as(); - CHECK(param); + ICHECK(param); return MakeUpSampling(call_node->args[0], ToScalar(scale_h->data), ToScalar(scale_w->data), param->layout, param->method, param->align_corners); @@ -145,11 +145,11 @@ class DynamicToStaticMutator : public MixedModeMutator { const ConstantNode* scale_h = call_node->args[2].as(); const ConstantNode* scale_w = call_node->args[3].as(); if (scale_d && scale_h && scale_w) { - CHECK_EQ(scale_d->data->ndim, 0); - CHECK_EQ(scale_h->data->ndim, 0); - CHECK_EQ(scale_w->data->ndim, 0); + ICHECK_EQ(scale_d->data->ndim, 0); + ICHECK_EQ(scale_h->data->ndim, 0); + ICHECK_EQ(scale_w->data->ndim, 0); const UpSampling3DAttrs* param = call_node->attrs.as(); - CHECK(param); + ICHECK(param); return MakeUpSampling3D(call_node->args[0], ToScalar(scale_d->data), ToScalar(scale_h->data), ToScalar(scale_w->data), @@ -163,11 +163,11 @@ class DynamicToStaticMutator : public MixedModeMutator { const ConstantNode* pad_width = call_node->args[1].as(); const ConstantNode* pad_fill = call_node->args[2].as(); if (pad_width && pad_fill) { - CHECK_EQ(pad_fill->data->ndim, 0); // pad_val is 1d - CHECK_EQ(pad_width->data->ndim, 2); // pad_width is 2d + ICHECK_EQ(pad_fill->data->ndim, 0); // pad_val is 1d + ICHECK_EQ(pad_width->data->ndim, 2); // pad_width is 2d const PadAttrs* param = call_node->attrs.as(); - CHECK(param); + ICHECK(param); return MakePad(call_node->args[0], ToMatrix(pad_width->data), ToScalar(pad_fill->data), param->pad_mode); } @@ -179,11 +179,11 @@ class DynamicToStaticMutator : public MixedModeMutator { const ConstantNode* end = call_node->args[2].as(); const ConstantNode* stride = call_node->args[3].as(); if (begin && end && stride) { - CHECK_EQ(begin->data->ndim, 1); - CHECK_EQ(end->data->ndim, 1); - CHECK_EQ(stride->data->ndim, 1); + ICHECK_EQ(begin->data->ndim, 1); + ICHECK_EQ(end->data->ndim, 1); + ICHECK_EQ(stride->data->ndim, 1); const StridedSliceAttrs* param = call_node->attrs.as(); - CHECK(param); + ICHECK(param); return MakeStridedSlice(call_node->args[0], ToVector(begin->data), ToVector(end->data), ToVector(stride->data), param->slice_mode); } diff --git a/src/relay/transforms/eliminate_common_subexpr.cc b/src/relay/transforms/eliminate_common_subexpr.cc index 720a97e9d19d..e9603575111d 100644 --- a/src/relay/transforms/eliminate_common_subexpr.cc +++ b/src/relay/transforms/eliminate_common_subexpr.cc @@ -45,7 +45,7 @@ class CommonSubexprEliminator : public MixedModeMutator { static auto op_stateful = Op::GetAttrMap("TOpIsStateful"); Expr new_expr = post; const CallNode* new_call = new_expr.as(); - CHECK(new_call); + ICHECK(new_call); const OpNode* op = new_call->op.as(); StructuralEqual attrs_equal; @@ -83,7 +83,7 @@ class CommonSubexprEliminator : public MixedModeMutator { Expr Rewrite_(const TupleGetItemNode* op, const Expr& post) final { Expr new_expr = post; const TupleGetItemNode* new_tuple_item = new_expr.as(); - CHECK(new_tuple_item); + ICHECK(new_tuple_item); if (fskip_ != nullptr && fskip_(new_expr)) { return new_expr; diff --git a/src/relay/transforms/eta_expand.cc b/src/relay/transforms/eta_expand.cc index 42718eec9179..4023c9dafef4 100644 --- a/src/relay/transforms/eta_expand.cc +++ b/src/relay/transforms/eta_expand.cc @@ -62,7 +62,7 @@ class EtaExpander : public ExprMutator { type_var_replacer_(TypeVarReplacer()), expand_constructor_(expand_constructor), expand_global_var_(expand_global_var) { - CHECK(expand_constructor || expand_global_var) << "must expand at least one language feature"; + ICHECK(expand_constructor || expand_global_var) << "must expand at least one language feature"; } IRModule Expand() { diff --git a/src/relay/transforms/fold_constant.cc b/src/relay/transforms/fold_constant.cc index 4a739ddba40f..48af31f9a11f 100644 --- a/src/relay/transforms/fold_constant.cc +++ b/src/relay/transforms/fold_constant.cc @@ -110,7 +110,7 @@ class ConstantFolder : public MixedModeMutator { bool inside_primitive = false; Expr VisitExpr_(const FunctionNode* op) final { if (op->HasNonzeroAttr(attr::kPrimitive)) { - CHECK_EQ(inside_primitive, false); + ICHECK_EQ(inside_primitive, false); inside_primitive = true; auto ret = ExprMutator::VisitExpr_(op); inside_primitive = false; @@ -253,7 +253,7 @@ class ConstantFolder : public MixedModeMutator { Expr EvaluateShapeOf(Expr expr, Array args, Attrs attrs) { Expr input = args[0]; const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); tvm::Array ishape; if (auto opt = GetConstantShape(input)) { @@ -271,7 +271,7 @@ class ConstantFolder : public MixedModeMutator { if (ishape.size() == 0) { value = runtime::NDArray::Empty({}, cdtype, ctx); } else { - CHECK_NE(ishape.size(), 0); + ICHECK_NE(ishape.size(), 0); std::vector cshape = {static_cast(ishape.size())}; value = runtime::NDArray::Empty(cshape, cdtype, ctx); int32_t* dims = static_cast(value->data); @@ -300,7 +300,7 @@ class ConstantFolder : public MixedModeMutator { Expr EvaluateNdarraySize(Expr expr, Array args, Attrs attrs) { Expr input = args[0]; const auto* param = attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); tvm::Array ishape; if (auto opt = GetConstantShape(input)) { diff --git a/src/relay/transforms/fold_scale_axis.cc b/src/relay/transforms/fold_scale_axis.cc index e4c924dad1e8..23be70c1e442 100644 --- a/src/relay/transforms/fold_scale_axis.cc +++ b/src/relay/transforms/fold_scale_axis.cc @@ -182,7 +182,7 @@ class ScaledExprNode : public TempExprNode { Expr scale = NullValue(); Expr Realize() const final { - CHECK(!axes.defined()) << "outstanding scale"; + ICHECK(!axes.defined()) << "outstanding scale"; return value; } @@ -268,7 +268,7 @@ class ForwardPrep : private ExprVisitor { auto f = fprep.get(call->op, nullptr); if (f != nullptr) { Array in_messages = f(GetRef(call), out_message); - CHECK_EQ(in_messages.size(), call->args.size()); + ICHECK_EQ(in_messages.size(), call->args.size()); for (size_t i = 0; i < call->args.size(); ++i) { this->Update(call->args[i], in_messages[i]); } @@ -400,8 +400,8 @@ Expr AddSubForwardRewrite(const Call& ref_call, const Array& new_args, auto rnode = make_object(); if (slhs != nullptr) { - CHECK(srhs == nullptr); - CHECK(MatchBroadcastToLeftAxes(tlhs, trhs, slhs->axes)); + ICHECK(srhs == nullptr); + ICHECK(MatchBroadcastToLeftAxes(tlhs, trhs, slhs->axes)); Expr scale = ReshapeOrExpandToMatchAxis(slhs->scale, tlhs->shape, slhs->axes); if (!scale.defined()) { return Expr(); @@ -411,8 +411,8 @@ Expr AddSubForwardRewrite(const Call& ref_call, const Array& new_args, rnode->scale = slhs->scale; rnode->axes = slhs->axes; } else { - CHECK(srhs != nullptr); - CHECK(MatchBroadcastToLeftAxes(trhs, tlhs, srhs->axes)); + ICHECK(srhs != nullptr); + ICHECK(MatchBroadcastToLeftAxes(trhs, tlhs, srhs->axes)); Expr scale = ReshapeOrExpandToMatchAxis(srhs->scale, trhs->shape, srhs->axes); if (!scale.defined()) { return Expr(); @@ -441,12 +441,12 @@ Expr MultiplyForwardRewrite(const Call& ref_call, const Array& new_args, const Message& message) { if (!message.defined()) return Expr(); const auto& expected_out_axes = message->axes; - CHECK(expected_out_axes.defined() && expected_out_axes.size()); + ICHECK(expected_out_axes.defined() && expected_out_axes.size()); // TODO(tvm-team) allow same axes accumulation // not as important because it is less common in nn. const auto* slhs = new_args[0].as(); const auto* srhs = new_args[1].as(); - CHECK(!slhs && !srhs); + ICHECK(!slhs && !srhs); const auto* tlhs = ref_call->args[0]->type_as(); const auto* trhs = ref_call->args[1]->type_as(); @@ -480,13 +480,13 @@ Array Conv2DForwardPrep(const Call& call, const Message& out_message) { // TODO(tvm-team) support general data layout // by transforming weight const auto* param = call->attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); Layout data_layout(param->data_layout); Layout kernel_layout(param->kernel_layout); int c_big_axis = data_layout.IndexOf(LayoutAxis::Get('C')); int c_small_axis = data_layout.IndexOf(LayoutAxis::Get('c')); - CHECK_GE(c_big_axis, 0); + ICHECK_GE(c_big_axis, 0); Message none = NullValue(); // For now, we only support simple pattern (no folded weight/data) // More general layout can be supported under the current framework. @@ -520,11 +520,11 @@ Expr Conv2DForwardRewrite(const Call& ref_call, const Array& new_args, if (sdata == nullptr) return Expr(); if (sweight != nullptr) return Expr(); const auto* param = ref_call->attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); Layout data_layout(param->data_layout); Layout kernel_layout(param->kernel_layout); int c_big_axis = data_layout.IndexOf(LayoutAxis::Get('C')); - CHECK_GE(c_big_axis, 0); + ICHECK_GE(c_big_axis, 0); int small_ko_axis = kernel_layout.IndexOf(LayoutAxis::Get('o')); int small_ki_axis = kernel_layout.IndexOf(LayoutAxis::Get('i')); int big_ki_axis = kernel_layout.IndexOf(LayoutAxis::Get('I')); @@ -532,11 +532,11 @@ Expr Conv2DForwardRewrite(const Call& ref_call, const Array& new_args, bool is_simple = (small_ko_axis < 0 && small_ki_axis < 0 && big_ki_axis >= 0); bool is_blocking = (small_ko_axis >= 0 && small_ki_axis >= 0 && big_ki_axis >= 0); - CHECK(is_simple || is_blocking); + ICHECK(is_simple || is_blocking); // Check it must be depthwise or full conv2d. bool is_depthwise_conv2d = IsDepthwiseConv2D(ref_call, param, kernel_layout); - CHECK(param->groups == 1 || is_depthwise_conv2d); + ICHECK(param->groups == 1 || is_depthwise_conv2d); Expr weight = new_args[1]; @@ -628,7 +628,7 @@ class BackwardPrep : private ExprVisitor { auto f = fprep.get(call->op, nullptr); if (f == nullptr) return; auto rit = ref_counter_.find(call); - CHECK(rit != ref_counter_.end()); + ICHECK(rit != ref_counter_.end()); // We only allow propagation of scale backward // if the expression is only referred by a single parent. if (rit->second != 1) return; @@ -668,7 +668,7 @@ class BackwardTransformerNode : public Object, private ExprMutator { if (const CallNode* call_node = expr.as()) { return Transform(call_node, message, scale); } else { - CHECK(!message.defined()) << "outstanding scale"; + ICHECK(!message.defined()) << "outstanding scale"; return ExprMutator::VisitExpr(expr); } } @@ -738,7 +738,7 @@ Expr BackwardTransformerNode::Transform(const CallNode* call_node, Message messa memo_[call] = new_expr; return new_expr; } else { - CHECK(!message.defined()) << "outstanding scale"; + ICHECK(!message.defined()) << "outstanding scale"; return NormalCallTransform(call_node); } } @@ -807,13 +807,13 @@ Expr AddSubBackwardTransform(const Call& call, const Message& message, const Exp StructuralEqual equal; if (lhs_message.defined() && rhs_message.defined()) { - CHECK(equal(lhs_message->axes, rhs_message->axes)); - CHECK(equal(message->axes, lhs_message->axes)); + ICHECK(equal(lhs_message->axes, rhs_message->axes)); + ICHECK(equal(message->axes, lhs_message->axes)); Expr lhs = transformer->Transform(call->args[0], message, scale); Expr rhs = transformer->Transform(call->args[1], message, scale); return Call(call->op, {lhs, rhs}, call->attrs, call->type_args); } else if (lhs_message.defined()) { - CHECK(equal(message->axes, lhs_message->axes)); + ICHECK(equal(message->axes, lhs_message->axes)); Expr lhs = transformer->Transform(call->args[0], message, scale); Expr rhs = transformer->Transform(call->args[1], NullValue(), NullValue()); Expr rhs_scale = ReshapeOrExpandToMatchAxis(scale, tlhs->shape, message->axes); @@ -823,7 +823,7 @@ Expr AddSubBackwardTransform(const Call& call, const Message& message, const Exp rhs = Multiply(rhs, rhs_scale); return Call(call->op, {lhs, rhs}, call->attrs, call->type_args); } else if (rhs_message.defined()) { - CHECK(equal(message->axes, rhs_message->axes)); + ICHECK(equal(message->axes, rhs_message->axes)); Expr lhs = transformer->Transform(call->args[0], NullValue(), NullValue()); Expr rhs = transformer->Transform(call->args[1], message, scale); Expr lhs_scale = ReshapeOrExpandToMatchAxis(scale, trhs->shape, message->axes); @@ -852,13 +852,13 @@ RELAY_REGISTER_OP("subtract") // Multiply produces the scale-axis pair. Expr MultiplyBackwardTransform(const Call& call, const Message& message, const Expr& scale, const BackwardTransformer& transformer) { - CHECK(!message.defined()) << "outstanding scale"; + ICHECK(!message.defined()) << "outstanding scale"; const auto* tlhs = call->args[0]->type_as(); const auto* trhs = call->args[1]->type_as(); Message lhs_message = transformer->GetMessage(call->args[0]); Message rhs_message = transformer->GetMessage(call->args[1]); if (lhs_message.defined()) { - CHECK(lhs_message->axes.defined() && lhs_message->axes.size()); + ICHECK(lhs_message->axes.defined() && lhs_message->axes.size()); // NOTE we won't recursively call mutating on scale part. // since there won't be scale chance within scale part. Expr rhs = call->args[1]; @@ -867,7 +867,7 @@ Expr MultiplyBackwardTransform(const Call& call, const Message& message, const E return transformer->Transform(call->args[0], lhs_message, rhs); } } else if (rhs_message.defined()) { - CHECK(rhs_message->axes.defined() && rhs_message->axes.size()); + ICHECK(rhs_message->axes.defined() && rhs_message->axes.size()); Expr lhs = call->args[0]; if (MatchBroadcastToLeftAxes(trhs, tlhs, rhs_message->axes, &lhs) && (!rhs_message->require_positive || IsAllPositiveConstant(lhs))) { @@ -884,13 +884,13 @@ RELAY_REGISTER_OP("multiply") // Conv2D send out requirement of axis folding. Message Conv2DBackwardPrep(const Call& call, const Array& in_messages) { const auto* param = call->attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); Layout kernel_layout(param->kernel_layout); Layout out_layout(param->out_layout == "" ? param->data_layout : param->out_layout); int c_big_axis = out_layout.IndexOf(LayoutAxis::Get('C')); int c_small_axis = out_layout.IndexOf(LayoutAxis::Get('c')); - CHECK_GE(c_big_axis, 0); + ICHECK_GE(c_big_axis, 0); // For now, we only support simple pattern (no folded weight/data) // More general layout can be supported under the current framework. // By using a unified layout transformation. @@ -921,11 +921,11 @@ Expr Conv2DBackwardTransform(const Call& call, const Message& message, const Exp return transformer->NormalCallTransform(call.operator->()); } const auto* param = call->attrs.as(); - CHECK(param != nullptr); + ICHECK(param != nullptr); Layout kernel_layout(param->kernel_layout); Layout out_layout(param->out_layout == "" ? param->data_layout : param->out_layout); int c_big_axis = out_layout.IndexOf(LayoutAxis::Get('C')); - CHECK_GE(c_big_axis, 0); + ICHECK_GE(c_big_axis, 0); // For now, we only support simple pattern (no folded weight/data) // TODO(tvm-team) support general data layout int small_ko_axis = kernel_layout.IndexOf(LayoutAxis::Get('o')); @@ -934,10 +934,10 @@ Expr Conv2DBackwardTransform(const Call& call, const Message& message, const Exp int big_ko_axis = kernel_layout.IndexOf(LayoutAxis::Get('O')); // Check it must be depthwise or full conv2d. bool is_depthwise_conv2d = IsDepthwiseConv2D(call, param, kernel_layout); - CHECK(param->groups == 1 || is_depthwise_conv2d); + ICHECK(param->groups == 1 || is_depthwise_conv2d); bool is_simple = (small_ko_axis < 0 && small_ki_axis < 0 && big_ki_axis >= 0); bool is_blocking = (small_ko_axis >= 0 && small_ki_axis >= 0 && big_ki_axis >= 0); - CHECK(is_simple || is_blocking); + ICHECK(is_simple || is_blocking); Expr data = transformer->Transform(call->args[0], NullValue(), NullValue()); Expr weight = transformer->Transform(call->args[1], NullValue(), NullValue()); diff --git a/src/relay/transforms/forward_rewrite.cc b/src/relay/transforms/forward_rewrite.cc index 58396256105b..be2d37477eb6 100644 --- a/src/relay/transforms/forward_rewrite.cc +++ b/src/relay/transforms/forward_rewrite.cc @@ -89,7 +89,7 @@ class ForwardRewriter : private MixedModeMutator { if (fmulti_ref_trigger_ != nullptr) { Expr ret = post; auto it = ref_counter_.find(expr.get()); - CHECK(it != ref_counter_.end()); + ICHECK(it != ref_counter_.end()); if (it->second > 1) { ret = fmulti_ref_trigger_(ret); } @@ -136,7 +136,7 @@ class ForwardRewriter : private MixedModeMutator { if (rewrite_func_) { frewrite = *rewrite_func_; } else { - CHECK(rewrite_map_); + ICHECK(rewrite_map_); frewrite = rewrite_map_->get(call_node->op, nullptr); } const auto* post_node = post.as(); diff --git a/src/relay/transforms/fuse_ops.cc b/src/relay/transforms/fuse_ops.cc index bc6335a539af..8023305f3f64 100644 --- a/src/relay/transforms/fuse_ops.cc +++ b/src/relay/transforms/fuse_ops.cc @@ -188,9 +188,9 @@ class IndexedForwardGraph::Creator : private ExprVisitor { void AddNode(const tvm::Object* key) { auto it = graph_.node_map.find(key); - CHECK(it != graph_.node_map.end()) << "Cannot find node " << GetRef(key); + ICHECK(it != graph_.node_map.end()) << "Cannot find node " << GetRef(key); IndexedForwardGraph::Node* node = it->second; - CHECK(node->ref == nullptr); + ICHECK(node->ref == nullptr); node->ref = key; node->index = graph_.post_dfs_order.size(); graph_.post_dfs_order.push_back(node); @@ -226,7 +226,7 @@ class IndexedForwardGraph::Creator : private ExprVisitor { } void VisitExpr_(const CallNode* call) final { - CHECK(graph_.node_map.count(call)); + ICHECK(graph_.node_map.count(call)); Node* node = graph_.node_map.at(call); static auto fpattern = Op::GetAttrMap("TOpPattern"); // Now we set the pattern of this call. @@ -270,7 +270,7 @@ class IndexedForwardGraph::Creator : private ExprVisitor { } void VisitExpr_(const TupleNode* op) final { - CHECK(graph_.node_map.count(op)); + ICHECK(graph_.node_map.count(op)); Node* tuple_node = graph_.node_map.at(op); tuple_node->pattern = kTuple; for (const Expr& field : op->fields) { @@ -286,7 +286,7 @@ class IndexedForwardGraph::Creator : private ExprVisitor { void VisitExpr_(const TupleGetItemNode* op) final { auto tuple_type = op->tuple->checked_type().as(); - CHECK(tuple_type); + ICHECK(tuple_type); // When TVM lowers a fused function, it expects all arguments to be a Tensor or // a tuple containing only Tensors. But this tuple may contain a reference or // another tuple. To avoid modifying codegen logic, we do not allow fusing through this node @@ -302,7 +302,7 @@ class IndexedForwardGraph::Creator : private ExprVisitor { if (has_non_tensor) { this->Update(op->tuple, nullptr, kOpaque); } else { - CHECK(graph_.node_map.count(op)); + ICHECK(graph_.node_map.count(op)); Node* node = graph_.node_map.at(op); node->pattern = kInjective; this->Update(op->tuple, node, kInjective); @@ -443,9 +443,9 @@ class DominatorTree { } auto get_node = [&](const IndexedForwardGraph::Edge& edge) { size_t oindex = edge.node->index; - CHECK_LT(oindex, nodes.size()); + ICHECK_LT(oindex, nodes.size()); Node* onode = nodes[oindex]; - CHECK(onode != nullptr); + ICHECK(onode != nullptr); return onode; }; Node* parent = get_node(link->value); @@ -563,7 +563,7 @@ class GraphPartitioner { if (visited_.count(src)) return true; visited_.insert(src); Group* gnode = groups_[src->index]; - CHECK(gnode != nullptr); + ICHECK(gnode != nullptr); gnode = gnode->FindRoot(); if (!fcond(gnode->pattern, src == sink)) return false; if (src == sink) return true; @@ -586,9 +586,9 @@ class GraphPartitioner { */ template bool CheckPath(IndexedForwardGraph::Node* src, IndexedForwardGraph::Node* sink, F fcond) { - CHECK(!src->extern_ref); + ICHECK(!src->extern_ref); visited_.clear(); - CHECK(src != sink); + ICHECK(src != sink); for (auto link = src->outputs.head; link != nullptr; link = link->next) { if (!CheckPath_(link->value.node, sink, fcond)) return false; } @@ -616,7 +616,7 @@ class GraphPartitioner { child->parent = parent; // update anchor ref and pattern if (child->anchor_ref != nullptr) { - CHECK(parent->anchor_ref == nullptr); + ICHECK(parent->anchor_ref == nullptr); parent->anchor_ref = child->anchor_ref; parent->pattern = CombinePattern(child->pattern, parent->pattern); } @@ -627,7 +627,7 @@ class GraphPartitioner { if (visited_.count(src)) return; visited_.insert(src); Group* gnode = groups_[src->index]; - CHECK(gnode != nullptr); + ICHECK(gnode != nullptr); // merge the current group to the parent if possible. MergeFromTo(gnode, target); for (auto link = src->outputs.head; link != nullptr; link = link->next) { @@ -643,7 +643,7 @@ class GraphPartitioner { void CommitFuse(IndexedForwardGraph::Node* src, IndexedForwardGraph::Node* sink) { Group* target = groups_[sink->index]; visited_.clear(); - CHECK(src != sink); + ICHECK(src != sink); CommitFuse_(src, sink, target); } @@ -651,7 +651,7 @@ class GraphPartitioner { if (src == sink || visited_.count(src)) return 0; visited_.insert(src); Group* gnode = groups_[src->index]; - CHECK(gnode != nullptr); + ICHECK(gnode != nullptr); auto sum = gnode->num_nodes; for (auto link = src->outputs.head; link != nullptr; link = link->next) { sum += CountNodesUptoSink_(link->value.node, sink); @@ -669,7 +669,7 @@ class GraphPartitioner { IndexedForwardGraph::Node* dom_parent) { Group* target = groups_[dom_parent->index]; visited_.clear(); - CHECK(child != dom_parent); + ICHECK(child != dom_parent); return target->FindRoot()->num_nodes + CountNodesUptoSink_(child, dom_parent); } @@ -696,12 +696,12 @@ class GraphPartitioner { auto* graph_node = graph.post_dfs_order[nid]; auto* dom_node = post_dom_tree.nodes[nid]; Group* group_node = groups_[nid]; - CHECK(group_node != nullptr); + ICHECK(group_node != nullptr); // no actions for opaque nodes if (group_node->pattern == kOpaque) continue; // no actions needed if the current node have no dominator if (dom_node->parent == nullptr) continue; - CHECK(!graph_node->extern_ref); + ICHECK(!graph_node->extern_ref); size_t dom_parent_gindex = dom_node->parent->gnode->index; // refuse the fusion if too many ops are going to be fused together @@ -740,7 +740,7 @@ class GraphPartitioner { // Path for OutEWiseFusable: conv2d // Check if the dominator relation is elemwise. if (dom_node->parent != nullptr && dom_node->pattern == kElemWise) { - CHECK(dom_node->parent->gnode != nullptr); + ICHECK(dom_node->parent->gnode != nullptr); // The fuse can be executed if all the intermediate ops are still broadcast. auto fcond = [](OpPatternKind kind, bool is_sink) { return kind <= kBroadcast; }; if (CheckPath(graph_node, dom_node->parent->gnode, fcond)) { @@ -778,7 +778,7 @@ class GraphPartitioner { } } else { // do nothing. - CHECK(group_node->pattern == kCommReduce); + ICHECK(group_node->pattern == kCommReduce); } } } @@ -805,7 +805,7 @@ class FuseMutator : private ExprMutator { auto graph = IndexedForwardGraph::Create(&arena_, body); auto groups = GraphPartitioner(&arena_, fuse_opt_level, max_fuse_depth).Partition(graph); for (size_t nid = 0; nid < graph.post_dfs_order.size(); ++nid) { - CHECK(graph.post_dfs_order[nid]->ref != nullptr); + ICHECK(graph.post_dfs_order[nid]->ref != nullptr); gmap_[graph.post_dfs_order[nid]->ref] = groups[nid]; } // The following line can be used for debug. @@ -863,7 +863,7 @@ class FuseMutator : private ExprMutator { // If it is a primitive op call // then we must have a group assignment for it already. - CHECK(gmap_.count(call)); + ICHECK(gmap_.count(call)); if (call->op == stop_fusion_op) { return ExprMutator::VisitExpr(call->args[0]); } diff --git a/src/relay/transforms/gradient.cc b/src/relay/transforms/gradient.cc index 1722c90069cb..9441f8af5d27 100644 --- a/src/relay/transforms/gradient.cc +++ b/src/relay/transforms/gradient.cc @@ -74,7 +74,7 @@ Expr FirstOrderGradient(const Expr& e, const Optional& mod); Type WithGradientType(const Type& t) { // TODO(@M.K.): stricter checking auto ty = t.as(); - CHECK(ty) << "input should be a function"; + ICHECK(ty) << "input should be a function"; return FuncType(ty->arg_types, TupleType({ty->ret_type, TupleType(ty->arg_types)}), {}, {}); } @@ -102,7 +102,7 @@ struct ADValueNode { template T& get() { auto ret = dynamic_cast(this); - CHECK(ret) << "cannot downcast"; + ICHECK(ret) << "cannot downcast"; return *ret; } }; @@ -183,7 +183,7 @@ struct FirstOrderReverseAD : ExprFunctor { ADValue VisitExpr_(const OpNode* op) final { Op op_ref = GetRef(op); - CHECK(rev_map.count(op_ref)) << op->name << " does not have reverse mode defined"; + ICHECK(rev_map.count(op_ref)) << op->name << " does not have reverse mode defined"; return std::make_shared( [this, op_ref](const Type& orig_type, const std::vector& args, const Attrs& attrs, const tvm::Array& type_args) { @@ -196,7 +196,7 @@ struct FirstOrderReverseAD : ExprFunctor { auto ret = std::make_shared(ll, orig); backprop_actions.push_back([this, args, orig, ret, op_ref](LetList* ll) { tvm::Array rev = rev_map[op_ref](orig, ret->reverse); - CHECK(args.size() == rev.size()); + ICHECK(args.size() == rev.size()); for (size_t i = 0; i < args.size(); ++i) { args[i]->get().reverse = ll->Push(Add(args[i]->get().reverse, rev[i])); @@ -271,7 +271,7 @@ struct FirstOrderReverseAD : ExprFunctor { return std::make_shared( [this, f](const Type& orig_type, const std::vector& args, const Attrs& attrs, const tvm::Array& type_args) { - CHECK_EQ(f->params.size(), args.size()); + ICHECK_EQ(f->params.size(), args.size()); for (size_t i = 0; i < f->params.size(); ++i) { env[f->params[i]] = args[i]; } @@ -305,8 +305,8 @@ Expr FirstOrderGradient(const Expr& re, const Optional& mod) { // order case. auto e = DeGlobal(mod, re); auto f = e.as(); - CHECK(f) << "FOWithGradient expects its argument to be a function: " << f; - CHECK(f->type_params.size() == 0) << "no polymorphism supported for now"; + ICHECK(f) << "FOWithGradient expects its argument to be a function: " << f; + ICHECK(f->type_params.size() == 0) << "no polymorphism supported for now"; // We will then build a sequence of lets which implement reverse mode. Expr body = LetList::With([&](LetList* ll) { @@ -364,7 +364,7 @@ Type ReverseType(const Type& t) { return ReverseADType()(t); } Expr LiftTensor(const std::function& f, const std::function& tf, const Type& forward_type, const Expr& e, LetList* ll) { - CHECK(IsAtomic(e)) << e; + ICHECK(IsAtomic(e)) << e; if (forward_type.as()) { auto ret = ll->Push(f(e)); ret->checked_type_ = tf(forward_type); @@ -390,8 +390,8 @@ Expr LiftTensor(const std::function& f, * by stitching the references in the AD values. */ void TransferGrads(const Type& forward_type, const Expr& from, const Expr& to, LetList* ll) { - CHECK(IsAtomic(from)) << from; - CHECK(IsAtomic(to)) << to; + ICHECK(IsAtomic(from)) << from; + ICHECK(IsAtomic(to)) << to; if (forward_type.as()) { auto from_ref = TupleGetItem(from, 1); auto to_ref = TupleGetItem(to, 1); @@ -487,9 +487,9 @@ struct ReverseAD : ExprMutator { Expr VisitCheckpoint(const CallNode* call) { const OpNode* op_node = call->op.as(); - CHECK(op_node) << "expected op in call"; + ICHECK(op_node) << "expected op in call"; Op op_ref = GetRef(op_node); - CHECK(op_ref->name == "annotation.checkpoint") << "expected checkpoint annotation"; + ICHECK(op_ref->name == "annotation.checkpoint") << "expected checkpoint annotation"; auto x = call->args[0]; return LetList::With([&](LetList* ll) { auto x_var = ll->Push(Remap(x)); @@ -518,7 +518,7 @@ struct ReverseAD : ExprMutator { return VisitCheckpoint(call); } - CHECK(rev_map.count(op_ref)) << op_node->name << " does not have reverse mode defined"; + ICHECK(rev_map.count(op_ref)) << op_node->name << " does not have reverse mode defined"; return LetList::With([&](LetList* ll) { std::vector args; for (const auto& arg : call->args) { @@ -536,7 +536,7 @@ struct ReverseAD : ExprMutator { auto bpv = ll->Push(RefRead(bp)); Expr nbp_body = LetList::With([&](LetList* ll) { tvm::Array rev = rev_map[op_ref](orig, GetGrad(call->checked_type(), ret, ll)); - CHECK(args.size() == rev.size()); + ICHECK(args.size() == rev.size()); for (size_t i = 0; i < args.size(); ++i) { UpdateGrad(call->args[i]->checked_type(), args[i], rev[i], ll); } @@ -585,7 +585,7 @@ struct ReverseAD : ExprMutator { Expr VisitExpr_(const GlobalVarNode* op) final { // todo: concatenating string to add attribute seems like a brittle hack. // maybe get module indexed by a rose tree of string? - CHECK(mod.defined()); + ICHECK(mod.defined()); auto orig_gv = GetRef(op); if (ad_gvars->count(orig_gv) == 0) { GlobalVar gv(op->name_hint + "_grad"); @@ -653,12 +653,12 @@ Expr Gradient(const Expr& re, const Optional& mod) { } auto e = DeGlobal(mod, re); auto f = e.as(); - CHECK(f) << "input need to be a function"; - CHECK(f->type_params.size() == 0) << "no polymorphism supported for now"; + ICHECK(f) << "input need to be a function"; + ICHECK(f->type_params.size() == 0) << "no polymorphism supported for now"; for (const auto& p : f->params) { - CHECK(p->checked_type().as()) << "input parameters need to be tensor"; + ICHECK(p->checked_type().as()) << "input parameters need to be tensor"; } - CHECK(!MissingGrad(e)) << "input has operators with missing gradients"; + ICHECK(!MissingGrad(e)) << "input has operators with missing gradients"; Expr body = LetList::With([&](LetList* ll) { Var bp = ll->Push(BPEmpty(), bpt); Expr rev = ReverseAD(mod, bp, std::make_shared(), @@ -676,7 +676,7 @@ Expr Gradient(const Expr& re, const Optional& mod) { if (t.as()) { ll->Push(RefWrite(GetField(e, 1), OnesLike(GetField(e, 0)))); } else if (auto tt = t.as()) { - CHECK_GT(tt->fields.size(), 0); + ICHECK_GT(tt->fields.size(), 0); init_grad(ll->Push(GetField(e, 0)), tt->fields[0]); } else { LOG(FATAL) << "unhandled type " << t; diff --git a/src/relay/transforms/infer_layout_utils.h b/src/relay/transforms/infer_layout_utils.h index 3965b0a6a78b..7edb07ce71ce 100644 --- a/src/relay/transforms/infer_layout_utils.h +++ b/src/relay/transforms/infer_layout_utils.h @@ -108,7 +108,7 @@ inline Array> ElemwiseArbitraryLayout(const Attrs& attrs, Layout ret; if (new_in_layouts.defined()) { - CHECK_GE(new_in_layouts.size(), 1); + ICHECK_GE(new_in_layouts.size(), 1); ret = new_in_layouts[0]; } else { for (size_t i = 0; i < old_in_layouts.size(); ++i) { @@ -130,7 +130,7 @@ inline Array> BinaryBroadcastLayout(const Attrs& attrs, Array layouts; Array> old_in_shapes; for (auto old_in_t : old_in_types) { - CHECK(old_in_t.as()); + ICHECK(old_in_t.as()); old_in_shapes.push_back(old_in_t.as()->shape); } @@ -217,7 +217,7 @@ static inline std::tuple, Array, bool> InferCorrectLayouts if (finfer_layout.count(op)) { Array> inferred_layouts; inferred_layouts = finfer_layout[op](call->attrs, new_in_layouts, old_in_layouts, old_in_types); - CHECK_EQ(inferred_layouts.size(), 2) + ICHECK_EQ(inferred_layouts.size(), 2) << "FInferCorrectLayout should return an array with size of 2"; for (auto x : inferred_layouts) { for (auto y : x) { diff --git a/src/relay/transforms/inline.cc b/src/relay/transforms/inline.cc index c9a0de44e2d4..dae34674de77 100644 --- a/src/relay/transforms/inline.cc +++ b/src/relay/transforms/inline.cc @@ -114,16 +114,16 @@ class Inliner : ExprMutator { // Make a new Relay expression to replace the callee. Expr MakeNewExpr(const GlobalVar& global, const Array& args, const Expr& callee) { - CHECK(callee->IsInstance() || callee->IsInstance()); + ICHECK(callee->IsInstance() || callee->IsInstance()); auto base_func = call_graph_->GetGlobalFunction(global); const auto* fn = base_func.as(); - CHECK(fn) << "Expected to work on a Relay function."; + ICHECK(fn) << "Expected to work on a Relay function."; auto func = Function(fn->params, fn->body, fn->ret_type, fn->type_params, fn->attrs); // Inline the function body to the caller if this function uses default // compiler, i.e. no external codegen is needed. if (!func->GetAttr(attr::kCompiler).defined()) { - CHECK_EQ(func->params.size(), args.size()) + ICHECK_EQ(func->params.size(), args.size()) << "Mismatch found in the number of parameters and call args"; // Bind the parameters with call args. Map bind_map; @@ -137,7 +137,7 @@ class Inliner : ExprMutator { // its body when the global var returns FuncType. return ret_type->IsInstance() ? std::move(func) : func->body; } else { - CHECK(callee->IsInstance()); + ICHECK(callee->IsInstance()); return Bind(func->body, bind_map); } } else if (const auto* call_node = callee.as()) { @@ -189,7 +189,7 @@ IRModule Inline(const IRModule& module) { if (const auto* fn = base_func.as()) { auto func = GetRef(fn); if (func->HasNonzeroAttr(attr::kInline)) { - CHECK_EQ(cgn->GetRefCount(), 0U) + ICHECK_EQ(cgn->GetRefCount(), 0U) << cgn->GetNameHint() << " is marked as inline but not inlined."; cgn->CleanCallGraphEntries(); cg->RemoveGlobalVarFromModule(cgn, /*update_call_graph*/ true); diff --git a/src/relay/transforms/lazy_gradient_init.cc b/src/relay/transforms/lazy_gradient_init.cc index de9406ec309d..079b790e74c0 100644 --- a/src/relay/transforms/lazy_gradient_init.cc +++ b/src/relay/transforms/lazy_gradient_init.cc @@ -131,8 +131,8 @@ class LazyGradientInitializer : public ExprMutator, public TypeMutator { auto* f = e.as(); auto* transformed = this->Mutate(e).as(); - CHECK(f); - CHECK(transformed); + ICHECK(f); + ICHECK(transformed); if (e.same_as(GetRef(transformed))) { return GetRef(transformed); diff --git a/src/relay/transforms/legalize.cc b/src/relay/transforms/legalize.cc index 89f59f625a8d..7daa028bbcf3 100644 --- a/src/relay/transforms/legalize.cc +++ b/src/relay/transforms/legalize.cc @@ -73,7 +73,7 @@ class Legalizer : public ExprRewriter { if (legalized_value.defined()) { // Check that the returned Expr from legalize is CallNode. const CallNode* legalized_call_node = legalized_value.as(); - CHECK(legalized_call_node) + ICHECK(legalized_call_node) << "Can only replace the original operator with another call node"; return legalized_value; } diff --git a/src/relay/transforms/let_list.h b/src/relay/transforms/let_list.h index c925dc0922a4..c75f18f6831c 100644 --- a/src/relay/transforms/let_list.h +++ b/src/relay/transforms/let_list.h @@ -64,8 +64,8 @@ class LetList { * \return a Var that hold the inserted expr. */ Var Push(Var pv, Expr expr) { - CHECK(!used_); - CHECK(WellFormed(expr)); + ICHECK(!used_); + ICHECK(WellFormed(expr)); lets_.emplace_back(std::make_pair(pv, expr)); return pv; } @@ -98,7 +98,7 @@ class LetList { * \return the wrapped expr. */ Expr Get(const Expr& body) { - CHECK(!used_); + ICHECK(!used_); Expr ret = body; for (auto rit = lets_.rbegin(); rit != lets_.rend(); ++rit) { ret = Let(std::get<0>(*rit), std::get<1>(*rit), ret); diff --git a/src/relay/transforms/merge_compiler_regions.cc b/src/relay/transforms/merge_compiler_regions.cc index 17fd44707b02..c7049bb4ee25 100644 --- a/src/relay/transforms/merge_compiler_regions.cc +++ b/src/relay/transforms/merge_compiler_regions.cc @@ -64,14 +64,14 @@ class RegionMerger : public MixedModeVisitor { // Check the region target. auto compiler_attrs = call->attrs.as(); - CHECK_EQ(region->GetTarget(), compiler_attrs->compiler); + ICHECK_EQ(region->GetTarget(), compiler_attrs->compiler); // Visit the unmerged parent regions. for (const auto& arg : region->GetInputs()) { // Region inputs must be begin annotation, and the region of // the begin annotation's argument is the parent region. auto begin = Downcast(arg); - CHECK_EQ(begin->op, CompilerBeginOp()); + ICHECK_EQ(begin->op, CompilerBeginOp()); auto parent_region = regions_->GetRegion(begin->args[0]); // Skip this region if it has been merged. @@ -86,7 +86,7 @@ class RegionMerger : public MixedModeVisitor { std::unordered_set mergeable_regions; for (const auto& arg : region->GetInputs()) { auto begin = Downcast(arg); - CHECK_EQ(begin->op, CompilerBeginOp()); + ICHECK_EQ(begin->op, CompilerBeginOp()); auto parent_region = regions_->GetRegion(begin->args[0]); if (parent_region.defined()) { mergeable_regions.insert(parent_region); diff --git a/src/relay/transforms/merge_composite.cc b/src/relay/transforms/merge_composite.cc index 7e7ad0e665a7..51f1387fd9ca 100644 --- a/src/relay/transforms/merge_composite.cc +++ b/src/relay/transforms/merge_composite.cc @@ -46,7 +46,7 @@ Function InferType(const Function& expr, const IRModule& m) { Expr MergeComposite(const Function& func, const Array& pattern_names, const Array& patterns, const std::vector& checks, const IRModule& m) { - CHECK_EQ(pattern_names.size(), patterns.size()); + ICHECK_EQ(pattern_names.size(), patterns.size()); Function merged_func = func; // merge the patterns one-by-one in order for (size_t i = 0; i < patterns.size(); i++) { diff --git a/src/relay/transforms/partial_eval.cc b/src/relay/transforms/partial_eval.cc index 276d093d6993..fa080a7ff22c 100644 --- a/src/relay/transforms/partial_eval.cc +++ b/src/relay/transforms/partial_eval.cc @@ -279,7 +279,7 @@ class FuelNode : public RelayNode { } /*! \brief return the new Fuel, and write (*progress | is progress made) to *progress. */ virtual Fuel Meet(const Fuel& f, bool* progress) const { - CHECK(progress); + ICHECK(progress); auto ret = Meet(f); *progress |= std::get<1>(ret); return std::get<0>(ret); @@ -295,8 +295,8 @@ struct FSeqNode : FuelNode { std::vector fuels; Fuel Meet(const Fuel& f, bool* progress) const final { auto x = f.as(); - CHECK(x); - CHECK_EQ(fuels.size(), x->fuels.size()); + ICHECK(x); + ICHECK_EQ(fuels.size(), x->fuels.size()); std::vector new_fuels; for (size_t i = 0; i < fuels.size(); ++i) { new_fuels.push_back(fuels[i]->Meet(x->fuels[i], progress)); @@ -320,7 +320,7 @@ struct FTimeNode : FuelNode { Time time; std::tuple Meet(const Fuel& f) const final { auto x = f.as(); - CHECK(x); + ICHECK(x); Time new_time = std::min(time, x->time); return std::make_tuple(MkFTime(new_time), new_time < time); } @@ -342,7 +342,7 @@ struct FTValueNode : FuelNode { size_t tvalue; std::tuple Meet(const Fuel& f) const final { auto x = f.as(); - CHECK(x); + ICHECK(x); size_t new_tvalue = std::min(tvalue, x->tvalue); return std::make_tuple(MkFTValue(new_tvalue), new_tvalue < tvalue); } @@ -401,9 +401,9 @@ class Environment { } void Insert(const Var& v, const PStatic& ps) { - CHECK(ps.defined()); - CHECK_GT(env_.size(), 0); - CHECK_EQ(env_.back().locals.count(v), 0); + ICHECK(ps.defined()); + ICHECK_GT(env_.size(), 0); + ICHECK_EQ(env_.back().locals.count(v), 0); env_.back().locals[v] = ps; } @@ -459,7 +459,7 @@ class Store { } void Insert(const SRefNode* r, const PStatic& ps) { - CHECK(r); + ICHECK(r); store_.back().store[r] = ps; } @@ -503,7 +503,7 @@ class Store { }; PStatic HasStatic(const Static& stat, const Expr& dynamic) { - CHECK(stat.defined()); + ICHECK(stat.defined()); return PStatic(make_object(stat, dynamic)); } @@ -579,8 +579,8 @@ Function AsFunc(const Expr& e) { if (e.as()) { return Downcast(e); } else if (const CallNode* c = e.as()) { - CHECK(c->op == with_funcid_op); - CHECK_EQ(c->args.size(), 1); + ICHECK(c->op == with_funcid_op); + ICHECK_EQ(c->args.size(), 1); return AsFunc(c->args[0]); } else { LOG(FATAL) << "Unknown case"; @@ -595,20 +595,20 @@ class PartialEvaluator : public ExprFunctor PStatic VisitExpr(const Expr& e, LetList* ll) final { PStatic ret = ExprFunctor::VisitExpr(e, ll); - CHECK(IsAtomic(ret->dynamic)) << ret->dynamic; + ICHECK(IsAtomic(ret->dynamic)) << ret->dynamic; return ret; } PStatic VisitExpr(const Expr& e, LetList* ll, const Var& name) { if (const CallNode* c = e.as()) { if (c->op == with_funcid_op) { - CHECK_EQ(c->args.size(), 1); + ICHECK_EQ(c->args.size(), 1); return VisitExpr(c->args[0], ll, name); } } PStatic ret = e.as() ? VisitFunc(Downcast(e), ll, name) : VisitExpr(e, ll); - CHECK(IsAtomic(ret->dynamic)) << ret->dynamic; + ICHECK(IsAtomic(ret->dynamic)) << ret->dynamic; return ret; } @@ -639,7 +639,7 @@ class PartialEvaluator : public ExprFunctor PStatic VisitExpr_(const VarNode* op, LetList* ll) final { return env_.Lookup(GetRef(op)); } PStatic VisitGlobalVar(const GlobalVar& gv) { - CHECK(mod_.defined()); + ICHECK(mod_.defined()); if (gv_map_.count(gv) == 0) { BaseFunc base_func = mod_->Lookup(gv); if (auto* n = base_func.as()) { @@ -670,7 +670,7 @@ class PartialEvaluator : public ExprFunctor PStatic c = VisitExpr(op->cond, ll); if (c->pstatic.defined()) { NDArray cpu_array = Downcast(c->pstatic)->data.CopyTo(CPUContext()); - CHECK_EQ(DataType(cpu_array->dtype), DataType::Bool()); + ICHECK_EQ(DataType(cpu_array->dtype), DataType::Bool()); if (reinterpret_cast(cpu_array->data)[0]) { return VisitExpr(op->true_branch, ll); } else { @@ -719,7 +719,7 @@ class PartialEvaluator : public ExprFunctor PStatic VisitExpr_(const CallNode* op, LetList* ll) final { if (op->op == with_funcid_op) { - CHECK_EQ(op->args.size(), 1); + ICHECK_EQ(op->args.size(), 1); return VisitExpr(op->args[0], ll); } PStatic f = VisitExpr(op->op, ll); @@ -743,7 +743,7 @@ class PartialEvaluator : public ExprFunctor FuncId fid_; Fuel old_fuel; FuelFrame(PartialEvaluator* pe, FuncId fid, const Fuel& new_fuel) : pe_(pe), fid_(fid) { - CHECK_GT(pe_->fuel_map_.count(fid_), 0); + ICHECK_GT(pe_->fuel_map_.count(fid_), 0); old_fuel = pe_->fuel_map_[fid_]; pe_->fuel_map_[fid_] = new_fuel; } @@ -775,7 +775,7 @@ class PartialEvaluator : public ExprFunctor } Func VisitFuncStatic(const Function& func, const Expr& var) { - CHECK(IsAtomic(var)); + ICHECK(IsAtomic(var)); if (func->HasNonzeroAttr(attr::kPrimitive)) { return ConstEvaluateFunc(func); } @@ -788,8 +788,8 @@ class PartialEvaluator : public ExprFunctor return [=](const PStatic& self, const std::vector& pv, const Attrs& attrs, const tvm::Array& type_args, LetList* ll) { return env_.Extend([&]() { - CHECK_EQ(pv.size(), func->params.size()); - CHECK_GT(func_map_.count(func), 0); + ICHECK_EQ(pv.size(), func->params.size()); + ICHECK_GT(func_map_.count(func), 0); FuncId fid = func_map_.at(func); if (fuel_map_.count(fid) == 0) { fuel_map_.insert({fid, MkFTop()}); @@ -914,7 +914,7 @@ class PartialEvaluator : public ExprFunctor } Func ConstEvaluateFunc(const Expr& expr) { - CHECK_EQ(FreeVars(expr).size(), 0); + ICHECK_EQ(FreeVars(expr).size(), 0); return [=](const PStatic& self, const std::vector& pv, const Attrs& attrs, const tvm::Array& type_args, LetList* ll) { tvm::Array ns_args; @@ -1002,10 +1002,10 @@ class PartialEvaluator : public ExprFunctor MatchStatus VisitPattern_(const PatternConstructorNode* op, const PStatic& ps) final { if (ps->pstatic.defined()) { SConstructor scn = Downcast(ps->pstatic); - CHECK_NE(op->constructor->tag, -1); - CHECK_NE(scn->constructor->tag, -1); + ICHECK_NE(op->constructor->tag, -1); + ICHECK_NE(scn->constructor->tag, -1); if (op->constructor->tag == scn->constructor->tag) { - CHECK_EQ(op->patterns.size(), scn->fields.size()); + ICHECK_EQ(op->patterns.size(), scn->fields.size()); MatchStatus current_match_status = MatchStatus::Match; for (size_t i = 0; i < op->patterns.size(); ++i) { MatchStatus ms = VisitPattern(op->patterns[i], scn->fields[i]); @@ -1029,7 +1029,7 @@ class PartialEvaluator : public ExprFunctor MatchStatus VisitPattern_(const PatternTupleNode* op, const PStatic& ps) final { if (ps->pstatic.defined()) { STuple stn = Downcast(ps->pstatic); - CHECK_EQ(op->patterns.size(), stn->fields.size()); + ICHECK_EQ(op->patterns.size(), stn->fields.size()); MatchStatus current_match_status = MatchStatus::Match; for (size_t i = 0; i < op->patterns.size(); ++i) { MatchStatus ms = VisitPattern(op->patterns[i], stn->fields[i]); @@ -1055,7 +1055,7 @@ class PartialEvaluator : public ExprFunctor void VisitExpr_(const FunctionNode* op) final { Function f = GetRef(op); - CHECK_EQ(pe->func_map_.count(f), 0); + ICHECK_EQ(pe->func_map_.count(f), 0); pe->func_map_.insert({f, pe->func_map_.size()}); VisitExpr(f->body); } @@ -1072,13 +1072,13 @@ class PartialEvaluator : public ExprFunctor void VisitExpr_(const CallNode* op) final { if (op->op == with_funcid_op) { - CHECK_EQ(op->args.size(), 1); - CHECK(op->attrs.defined()); - CHECK(op->attrs.as()); + ICHECK_EQ(op->args.size(), 1); + ICHECK(op->attrs.defined()); + ICHECK(op->attrs.as()); Function f = AsFunc(op->args[0]); FuncId fid = op->attrs.as()->fid; if (pe->func_map_.count(f) != 0) { - CHECK_EQ(pe->func_map_.at(f), fid); + ICHECK_EQ(pe->func_map_.at(f), fid); } pe->func_map_.insert({f, fid}); } @@ -1087,7 +1087,7 @@ class PartialEvaluator : public ExprFunctor void VisitExpr_(const FunctionNode* op) final { Function f = GetRef(op); - CHECK_GT(pe->func_map_.count(f), 0); + ICHECK_GT(pe->func_map_.count(f), 0); ExprVisitor::VisitExpr_(op); } @@ -1104,7 +1104,7 @@ class PartialEvaluator : public ExprFunctor Expr VisitExpr_(const FunctionNode* op) final { Function f = GetRef(op); - CHECK_GT(pe->func_map_.count(f), 0); + ICHECK_GT(pe->func_map_.count(f), 0); return MkWithFuncId(ExprMutator::VisitExpr_(op), pe->func_map_.at(f)); } @@ -1163,7 +1163,7 @@ Expr StripWithFuncId(const Expr& e) { struct StripWithFuncIdMutator : ExprMutator, PatternMutator { Expr VisitExpr_(const CallNode* op) final { if (op->op == with_funcid_op) { - CHECK_EQ(op->args.size(), 1); + ICHECK_EQ(op->args.size(), 1); return VisitExpr(op->args[0]); } else { return ExprMutator::VisitExpr_(op); diff --git a/src/relay/transforms/partition_graph.cc b/src/relay/transforms/partition_graph.cc index 08d26d76ee2d..75bc46387cc6 100644 --- a/src/relay/transforms/partition_graph.cc +++ b/src/relay/transforms/partition_graph.cc @@ -130,7 +130,7 @@ class Partitioner : public MixedModeMutator { return post; } else if (call->op == CompilerBeginOp()) { // The annotation node is inserted on edge so it must have only one argument. - CHECK_EQ(call->args.size(), 1U); + ICHECK_EQ(call->args.size(), 1U); // Traverse the rest graph. Expr parent = call->args[0]; @@ -147,7 +147,7 @@ class Partitioner : public MixedModeMutator { AnnotatedRegion sg = GetRegion(GetRef(call)); int index = GetArgIdx(sg, GetRef(call)); - CHECK_NE(index, -1); + ICHECK_NE(index, -1); if (region_func_meta_[sg].region_func_in.count(parent)) { return region_func_meta_[sg].region_func_in[parent]; @@ -169,10 +169,10 @@ class Partitioner : public MixedModeMutator { return std::move(var); } } else { - CHECK_EQ(call->op, CompilerEndOp()); + ICHECK_EQ(call->op, CompilerEndOp()); // The annotation node is inserted on edge so it must have only one // argument. - CHECK_EQ(call->args.size(), 1U); + ICHECK_EQ(call->args.size(), 1U); AnnotatedRegion region = GetRegion(GetRef(call)); @@ -182,7 +182,7 @@ class Partitioner : public MixedModeMutator { // Traverse subgraph inputs. auto input = Downcast(post)->args[0]; - CHECK(region.defined()) << "Region not defined for " << GetRef(call); + ICHECK(region.defined()) << "Region not defined for " << GetRef(call); // functions are created for each annotated regions, // when their first output is encountered. // If multiple outputs are there, a tuple node is inserted at the end. @@ -194,7 +194,7 @@ class Partitioner : public MixedModeMutator { // Retrieve this particular output of function. Expr region_out_expr = Downcast(GetRef(call))->args[0]; - CHECK(region_func_meta_[region].region_func_out.count(region_out_expr)); + ICHECK(region_func_meta_[region].region_func_out.count(region_out_expr)); return region_func_meta_[region].region_func_out[region_out_expr]; } } @@ -325,7 +325,7 @@ class Partitioner : public MixedModeMutator { global_region_func = WithAttr(std::move(global_region_func), attr::kInline, tvm::Integer(1)); std::string fname = name; - CHECK(!module_->ContainGlobalVar(fname)) << "Global function " << fname << " already exists"; + ICHECK(!module_->ContainGlobalVar(fname)) << "Global function " << fname << " already exists"; // Create a global function and add it to the IRModule for the region. // This way we lift the functions that should be handled by external // codegen to the module scope and rely on the pass manager to prevent @@ -444,7 +444,7 @@ IRModule FlattenTupleOutputs(IRModule module) { if (call->op == CompilerEndOp()) { std::string target = call->attrs.as()->compiler; // Arguments of annotation ops should be 1 - CHECK_EQ(call->args.size(), 1U); + ICHECK_EQ(call->args.size(), 1U); auto annotated_op = Downcast(post)->args[0]; if (const auto* tn = annotated_op.as()) { Array new_fields; diff --git a/src/relay/transforms/pattern_utils.h b/src/relay/transforms/pattern_utils.h index 82ffd8a17c1b..555391a27e4b 100644 --- a/src/relay/transforms/pattern_utils.h +++ b/src/relay/transforms/pattern_utils.h @@ -163,7 +163,7 @@ inline Expr ExpandBiasToMatchAxis(Expr bias, int target_ndim, const Arrayvalue - axes[i - 1]->value; - CHECK_GE(diff, 0L); + ICHECK_GE(diff, 0L); if (diff > 0) { auto attrs = make_object(); attrs->axis = i; @@ -199,7 +199,7 @@ inline int64_t GetConv2DSuperChannelsDim(const CallNode* call) { auto param = call->attrs.as(); auto tweight = call->args[1]->type_as(); auto index = param->kernel_layout.operator std::string().find('O'); - CHECK_NE(index, std::string::npos); + ICHECK_NE(index, std::string::npos); auto channels = tir::as_const_int(tweight->shape[index]); return *channels; } @@ -331,8 +331,8 @@ static inline Constant CheckConstantShape(const Array& shape) { auto* shape_data = static_cast(shape_array->data); for (size_t i = 0; i < shape.size(); ++i) { const auto& dim_val = shape[i].as(); - CHECK(dim_val) << "Do not support symbolic shape for " - "Array format. Pass shape as Expr instead."; + ICHECK(dim_val) << "Do not support symbolic shape for " + "Array format. Pass shape as Expr instead."; shape_data[i] = dim_val->value; } return Constant(shape_array); @@ -350,8 +350,8 @@ static inline Array CheckConstantShapeArrayInteger(const Array(); - CHECK(dim_val) << "Do not support symbolic shape for " - "Array format. Pass shape as Expr instead."; + ICHECK(dim_val) << "Do not support symbolic shape for " + "Array format. Pass shape as Expr instead."; constShape.push_back(dim_val->value); } @@ -423,7 +423,7 @@ static inline long double ToScalar(const runtime::NDArray& array, size_t i = 0) */ static inline Array ToVector(const runtime::NDArray& array) { size_t ndim = array.Shape().size(); - CHECK_EQ(ndim, 1) << "This function should only be used for 1D NDArrays"; + ICHECK_EQ(ndim, 1) << "This function should only be used for 1D NDArrays"; size_t len = array.Shape().front(); Array out; for (size_t i = 0; i < len; ++i) { @@ -440,7 +440,7 @@ static inline Array ToVector(const runtime::NDArray& array) { */ static inline Array> ToMatrix(const runtime::NDArray& array) { size_t ndim = array.Shape().size(); - CHECK_EQ(ndim, 2) << "This function should only used for 2D NDArrays"; + ICHECK_EQ(ndim, 2) << "This function should only used for 2D NDArrays"; size_t dim1 = array.Shape().at(0); size_t dim2 = array.Shape().at(1); @@ -494,8 +494,8 @@ inline Expr Log(Expr e) { template T GetScalarFromConstant(Expr expr) { const auto* n = expr.as(); - CHECK(n) << "Expr must be a constant expr - " << AsText(expr, false); - CHECK(n->is_scalar()); + ICHECK(n) << "Expr must be a constant expr - " << AsText(expr, false); + ICHECK(n->is_scalar()); return static_cast(n->data->data)[0]; } diff --git a/src/relay/transforms/simplify_fc_transpose.cc b/src/relay/transforms/simplify_fc_transpose.cc index 99ded0ba591d..b5090e7e6fe4 100644 --- a/src/relay/transforms/simplify_fc_transpose.cc +++ b/src/relay/transforms/simplify_fc_transpose.cc @@ -81,7 +81,7 @@ class FCTransposeMutator : public ExprRewriter { explicit FCTransposeMutator(const Array& target_weights) : dense_op_(Op::Get("nn.dense")), transpose_op_(Op::Get("transpose")) { for (size_t i = 0; i < target_weights.size(); ++i) { - CHECK(target_weights[i]->IsInstance()); + ICHECK(target_weights[i]->IsInstance()); std::string k = target_weights[i].as()->data; target_weights_.emplace(k); } @@ -96,7 +96,7 @@ class FCTransposeMutator : public ExprRewriter { const auto arg = weight->args[0]; if (arg.as()) { const auto& arg_node = arg.as(); - CHECK_GT(target_weights_.count(arg_node->name_hint()), 0); + ICHECK_GT(target_weights_.count(arg_node->name_hint()), 0); const auto& tt = arg_node->type_annotation.as(); auto wt_type = TensorType({tt->shape[1], tt->shape[0]}, tt->dtype); Var wt(arg_node->name_hint() + ".T", wt_type); diff --git a/src/relay/transforms/simplify_inference.cc b/src/relay/transforms/simplify_inference.cc index 7df71967d834..7e587664b4dc 100644 --- a/src/relay/transforms/simplify_inference.cc +++ b/src/relay/transforms/simplify_inference.cc @@ -34,7 +34,7 @@ namespace relay { Expr BatchNormToInferUnpack(const Attrs attrs, Expr data, Expr gamma, Expr beta, Expr moving_mean, Expr moving_var, Type tdata) { auto ttype = tdata.as(); - CHECK(ttype); + ICHECK(ttype); const auto param = attrs.as(); Expr epsilon = MakeConstantScalar(ttype->dtype, static_cast(param->epsilon)); Expr var_add_eps = Add(moving_var, epsilon); @@ -62,9 +62,9 @@ Expr BatchNormToInferUnpack(const Attrs attrs, Expr data, Expr gamma, Expr beta, Expr GroupNormToInferUnpack(const Attrs attrs, Expr data, Expr gamma, Expr beta, Type tdata) { auto ttype = tdata.as(); - CHECK(ttype); + ICHECK(ttype); const auto param = attrs.as(); - CHECK(param); + ICHECK(param); int ndim = ttype->shape.size(); int axis = (param->axis < 0) ? param->axis + ndim : param->axis; @@ -117,9 +117,9 @@ Expr GroupNormToInferUnpack(const Attrs attrs, Expr data, Expr gamma, Expr beta, Expr LayerNormToInferUnpack(const Attrs attrs, Expr data, Expr gamma, Expr beta, Type tdata) { auto ttype = tdata.as(); - CHECK(ttype); + ICHECK(ttype); const auto param = attrs.as(); - CHECK(param); + ICHECK(param); Expr epsilon = MakeConstantScalar(ttype->dtype, static_cast(param->epsilon)); Expr mean = Mean(data, {param->axis}, true, false); @@ -140,9 +140,9 @@ Expr LayerNormToInferUnpack(const Attrs attrs, Expr data, Expr gamma, Expr beta, Expr InstanceNormToInferUnpack(const Attrs attrs, Expr data, Expr gamma, Expr beta, Type tdata) { auto ttype = tdata.as(); - CHECK(ttype); + ICHECK(ttype); const auto param = attrs.as(); - CHECK(param); + ICHECK(param); int ndim = ttype->shape.size(); int axis = (param->axis < 0) ? param->axis + ndim : param->axis; @@ -168,7 +168,7 @@ Expr InstanceNormToInferUnpack(const Attrs attrs, Expr data, Expr gamma, Expr be Expr L2NormToInferUnpack(const Attrs attrs, Expr data) { const auto param = attrs.as(); - CHECK(param); + ICHECK(param); Expr epsilon = MakeConstantScalar(DataType::Float(32), static_cast(param->eps)); diff --git a/src/relay/transforms/to_a_normal_form.cc b/src/relay/transforms/to_a_normal_form.cc index e5d7b133c0c0..05844477cc5b 100644 --- a/src/relay/transforms/to_a_normal_form.cc +++ b/src/relay/transforms/to_a_normal_form.cc @@ -65,7 +65,7 @@ std::pair CalcScope(const DependencyGraph& dg) { auto iit = n->parents.head; Scope s; if (iit == nullptr) { - CHECK(!global_scope_used); + ICHECK(!global_scope_used); s = global_scope; global_scope_used = true; } else { @@ -90,7 +90,7 @@ std::pair CalcScope(const DependencyGraph& dg) { expr_scope.insert({n, s}); } } - CHECK(global_scope_used); + ICHECK(global_scope_used); return std::make_pair(expr_scope, lifted_exprs); } @@ -114,11 +114,11 @@ Scope Fill::GetSubScope(const Expr& e, size_t i) { DependencyGraph::Node* n = dg_.expr_node.at(e); auto h = n->children.head; while (i != 0) { - CHECK(h); + ICHECK(h); --i; h = h->next; } - CHECK(h); + ICHECK(h); return node_scope_->at(h->value); } @@ -130,7 +130,7 @@ Expr Fill::VisitExpr(const Expr& e, const Var& v) { } auto ret = memo.at(e); // if no include_set is specified, every expression should be atomic. - if (include_set_ == nullptr) CHECK(IsAtomic(ret)); + if (include_set_ == nullptr) ICHECK(IsAtomic(ret)); return ret; } @@ -258,12 +258,12 @@ IRModule ToANormalForm(const IRModule& m) { tvm::Map updates; auto funcs = m->functions; for (const auto& it : funcs) { - CHECK_EQ(FreeVars(it.second).size(), 0); + ICHECK_EQ(FreeVars(it.second).size(), 0); if (const auto* n = it.second.as()) { if (n->GetAttr(attr::kCompiler).defined()) continue; } Expr ret = TransformF([&](const Expr& e) { return transform::ToANormalForm(e); }, it.second); - CHECK_EQ(FreeVars(ret).size(), 0) + ICHECK_EQ(FreeVars(ret).size(), 0) << AsText(ret) << "should not has free vars: " << FreeVars(ret); updates.Set(it.first, Downcast(ret)); } diff --git a/src/relay/transforms/to_basic_block_normal_form.cc b/src/relay/transforms/to_basic_block_normal_form.cc index fcec4e80ce5b..1aab367cf22a 100644 --- a/src/relay/transforms/to_basic_block_normal_form.cc +++ b/src/relay/transforms/to_basic_block_normal_form.cc @@ -54,7 +54,7 @@ IRModule ToBasicBlockNormalForm(const IRModule& mod) { tvm::Map updates; auto funcs = mod->functions; for (const auto& it : funcs) { - CHECK_EQ(FreeVars(it.second).size(), 0) << "Expected no free variables"; + ICHECK_EQ(FreeVars(it.second).size(), 0) << "Expected no free variables"; if (const auto* n = it.second.as()) { if (n->GetAttr(attr::kCompiler).defined()) continue; } diff --git a/src/relay/transforms/to_cps.cc b/src/relay/transforms/to_cps.cc index 5ece50133172..b7f9cafbc7dc 100644 --- a/src/relay/transforms/to_cps.cc +++ b/src/relay/transforms/to_cps.cc @@ -134,7 +134,7 @@ Function ToCPS(const Function& f, const IRModule& m, CPSMap* cm, VarMap* vm, } Expr VisitExpr_(const FunctionNode* op, const MCont& k) final { - CHECK(!op->HasNonzeroAttr(attr::kPrimitive)) << "primitive func not supported yet."; + ICHECK(!op->HasNonzeroAttr(attr::kPrimitive)) << "primitive func not supported yet."; return k(ToCPS(GetRef(op), m, cm, vm, answer)); } @@ -309,14 +309,14 @@ Function ToCPS(const Function& f, const IRModule& m) { Function UnCPS(const Function& f) { CheckFeature(f, FeatureSet::All() - fGraph); - CHECK_GT(f->params.size(), 0); + ICHECK_GT(f->params.size(), 0); std::vector new_params; for (const auto& p : f->params) { new_params.push_back(Var(p->name_hint(), p->checked_type())); } auto cont_type = Downcast(new_params.back()->type_annotation); new_params.pop_back(); - CHECK_EQ(cont_type->arg_types.size(), 1); + ICHECK_EQ(cont_type->arg_types.size(), 1); auto new_ret_type = Type(cont_type->arg_types[0]); std::vector new_type_params; for (const auto& tp : f->type_params) { @@ -325,7 +325,7 @@ Function UnCPS(const Function& f) { auto answer_type = new_type_params.back(); new_type_params.pop_back(); // TODO(@M.K.): make alphaequal work on free term - // CHECK(tvm::StructuralEqual()(cont_type, Arrow(new_ret_type, answer_type))); + // ICHECK(tvm::StructuralEqual()(cont_type, Arrow(new_ret_type, answer_type))); auto x = Var("x", new_ret_type); auto cont = Function({x}, x, new_ret_type, {}, {}); tvm::Array args; diff --git a/src/relay/transforms/transform_layout.h b/src/relay/transforms/transform_layout.h index c250d3801b68..35fb176c6bca 100644 --- a/src/relay/transforms/transform_layout.h +++ b/src/relay/transforms/transform_layout.h @@ -138,9 +138,9 @@ class TransformMemorizer : public ObjectRef { } // 2) Insert layout transform on the transformed src. - CHECK(new_src_layout.defined() && dst_layout.defined()) + ICHECK(new_src_layout.defined() && dst_layout.defined()) << "Cannot insert layout transform because there are undefined layouts"; - CHECK(tir::BijectiveLayout(new_src_layout, dst_layout).defined()) + ICHECK(tir::BijectiveLayout(new_src_layout, dst_layout).defined()) << "Cannot insert layout transform because there are inconvertible layouts: " << new_src_layout << " v.s. " << dst_layout; return MakeLayoutTransform(input_expr, new_src_layout.name(), dst_layout.name()); @@ -299,7 +299,7 @@ Expr LayoutRewriter(const Call& ref_call, const Array& new_args, const Obj if (!success) { return Expr(nullptr); } - CHECK_EQ(old_in.size(), new_in.size()); + ICHECK_EQ(old_in.size(), new_in.size()); // if new_in == 'undef': new_in = old_in for (size_t i = 0; i < new_in.size(); ++i) { @@ -322,9 +322,9 @@ Expr LayoutRewriter(const Call& ref_call, const Array& new_args, const Obj return Expr(nullptr); } - CHECK_EQ(new_out.size(), old_out.size()) + ICHECK_EQ(new_out.size(), old_out.size()) << "The number of output nodes should keep the same during alter_op_layout"; - CHECK_EQ(new_in.size(), new_in2.size()) + ICHECK_EQ(new_in.size(), new_in2.size()) << "The number of input nodes should keep the same during alter_op_layout"; // if (new_in != new_in2): insert transform (new_in -> new_in2) @@ -344,7 +344,7 @@ Expr LayoutRewriter(const Call& ref_call, const Array& new_args, const Obj pt++; } } - CHECK_EQ(pt, inputs.size()); + ICHECK_EQ(pt, inputs.size()); // state[node] = (old_out, new_out) // (handle tuple output) @@ -362,7 +362,7 @@ Expr LayoutRewriter(const Call& ref_call, const Array& new_args, const Obj return Tuple(fields); } else { auto rnode = make_object>(); - CHECK_EQ(new_out.size(), 1); + ICHECK_EQ(new_out.size(), 1); rnode->value = Call(new_call->op, transformed_args, new_call->attrs); rnode->old_layout = old_out[0]; rnode->new_layout = new_out[0]; diff --git a/src/relay/transforms/type_infer.cc b/src/relay/transforms/type_infer.cc index 105aed3614cd..cb3ba0030a5b 100644 --- a/src/relay/transforms/type_infer.cc +++ b/src/relay/transforms/type_infer.cc @@ -60,15 +60,15 @@ struct TupleGetItemAttrs : public tvm::AttrsNode { bool TupleGetItemRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + ICHECK_EQ(types.size(), 2); if (types[0].as()) return false; const auto* data = types[0].as(); - CHECK(data != nullptr) << "TupleGetItem expect input type to be TupleType " - << " get " << types[0] << " instead"; + ICHECK(data != nullptr) << "TupleGetItem expect input type to be TupleType " + << " get " << types[0] << " instead"; const auto* param = attrs.as(); - CHECK(param != nullptr); - CHECK_GE(param->index, 0); - CHECK_LT(param->index, data->fields.size()); + ICHECK(param != nullptr); + ICHECK_GE(param->index, 0); + ICHECK_LT(param->index, data->fields.size()); reporter->Assign(types[1], data->fields[param->index]); return true; } @@ -149,7 +149,7 @@ class TypeInferencer : private ExprFunctor, return it->second.checked_type; } Type ret = this->VisitExpr(expr); - CHECK(ret.defined()); + ICHECK(ret.defined()); KindCheck(ret, mod_, this->diag_ctx); ResolvedTypeInfo& rti = type_map_[expr]; rti.checked_type = ret; @@ -202,8 +202,8 @@ class TypeInferencer : private ExprFunctor, } void VisitPattern_(const PatternConstructorNode* con, const Type& t) { - CHECK(mod_.defined()) << "Cannot do type inference without a environment:" - << con->constructor->name_hint; + ICHECK(mod_.defined()) << "Cannot do type inference without a environment:" + << con->constructor->name_hint; TypeData td = mod_->type_definitions.at(con->constructor->belong_to); auto pc = GetRef(con); @@ -264,7 +264,7 @@ class TypeInferencer : private ExprFunctor, if (!tt) { this->EmitFatal(Diagnostic::Error(pt->span) << "Expected a tuple type, got " << unified); } - CHECK(tup->patterns.size() == tt->fields.size()) << "not enough pattern"; + ICHECK(tup->patterns.size() == tt->fields.size()) << "not enough pattern"; for (size_t i = 0; i < tup->patterns.size(); ++i) { VisitPattern(tup->patterns[i], tt->fields[i]); } @@ -325,7 +325,7 @@ class TypeInferencer : private ExprFunctor, Type vtype = GetType(let->value); let_type = Unify(let_type, vtype, let->span); - CHECK(is_functional_literal || !type_map_.count(let->var)); + ICHECK(is_functional_literal || !type_map_.count(let->var)); // NOTE: no scoping is necessary because var are unique in program type_map_[let->var].checked_type = let_type; return GetType(let->body); @@ -368,7 +368,7 @@ class TypeInferencer : private ExprFunctor, // Build a subsitituion map up from the function type and type arguments. // Eventually allow the type vars to be passed in. - CHECK(fn_ty->type_params.size() == ty_args.size()) + ICHECK(fn_ty->type_params.size() == ty_args.size()) << "number of type parameters does not match expected"; for (size_t i = 0; i < ty_args.size(); ++i) { subst_map.Set(fn_ty->type_params[i], ty_args[i]); @@ -408,7 +408,7 @@ class TypeInferencer : private ExprFunctor, if (type_info == type_map_.end()) { type_map_.insert({expr, ResolvedTypeInfo(Type(), type_args)}); } else { - CHECK(!type_info->second.type_args.defined()); + ICHECK(!type_info->second.type_args.defined()); type_info->second.type_args = type_args; } } @@ -511,7 +511,7 @@ class TypeInferencer : private ExprFunctor, if (f->ret_type.defined()) { rtype = this->Unify(f->ret_type, rtype, GetRef(f)->span); } - CHECK(rtype.defined()); + ICHECK(rtype.defined()); auto ret = FuncType(arg_types, rtype, f->type_params, {}); return solver_.Resolve(ret); } @@ -532,7 +532,7 @@ class TypeInferencer : private ExprFunctor, } Type VisitExpr_(const ConstructorNode* c) final { - CHECK(mod_.defined()) << "Cannot do type inference without a environment:" << c->name_hint; + ICHECK(mod_.defined()) << "Cannot do type inference without a environment:" << c->name_hint; TypeData td = mod_->LookupTypeDef(c->belong_to); std::vector types; for (const auto& t : td->type_vars) { @@ -595,7 +595,7 @@ class TypeInferencer::Resolver : public ExprMutator, PatternMutator { template Expr AttachCheckedType(const T* op) { auto it = tmap_.find(GetRef(op)); - CHECK(it != tmap_.end()); + ICHECK(it != tmap_.end()); Type checked_type = solver_->Resolve(it->second.checked_type); if (checked_type.as() != nullptr) { @@ -664,7 +664,7 @@ class TypeInferencer::Resolver : public ExprMutator, PatternMutator { } if (need_update_fn) { auto* fn_type = checked_type.as(); - CHECK(fn_type != nullptr); + ICHECK(fn_type != nullptr); new_fn->ret_type = fn_type->ret_type; } return new_e; @@ -713,7 +713,7 @@ struct AllCheckTypePopulated : ExprVisitor { if (e.as()) { return; } - CHECK(e->checked_type_.defined()) << "Expression: " << e; + ICHECK(e->checked_type_.defined()) << "Expression: " << e; return ExprVisitor::VisitExpr(e); } }; @@ -788,7 +788,7 @@ Pass InferType() { } auto free_tvars = FreeTypeVars(updated_func, mod); - CHECK(free_tvars.size() == 0) + ICHECK(free_tvars.size() == 0) << "Found unbound type variables in " << updated_func << ": " << free_tvars; EnsureCheckedType(updated_func); updates.push_back({it.first, Downcast(updated_func)}); diff --git a/src/runtime/c_runtime_api.cc b/src/runtime/c_runtime_api.cc index 9895ff6987ad..299f2826f7d7 100644 --- a/src/runtime/c_runtime_api.cc +++ b/src/runtime/c_runtime_api.cc @@ -44,47 +44,47 @@ namespace runtime { std::string GetCustomTypeName(uint8_t type_code) { auto f = tvm::runtime::Registry::Get("runtime._datatype_get_type_name"); - CHECK(f) << "Function runtime._datatype_get_type_name not found"; + ICHECK(f) << "Function runtime._datatype_get_type_name not found"; return (*f)(type_code).operator std::string(); } uint8_t GetCustomTypeCode(const std::string& type_name) { auto f = tvm::runtime::Registry::Get("runtime._datatype_get_type_code"); - CHECK(f) << "Function runtime._datatype_get_type_code not found"; + ICHECK(f) << "Function runtime._datatype_get_type_code not found"; return (*f)(type_name).operator int(); } bool GetCustomTypeRegistered(uint8_t type_code) { auto f = tvm::runtime::Registry::Get("runtime._datatype_get_type_registered"); - CHECK(f) << "Function runtime._datatype_get_type_registered not found"; + ICHECK(f) << "Function runtime._datatype_get_type_registered not found"; return (*f)(type_code).operator bool(); } uint8_t ParseCustomDatatype(const std::string& s, const char** scan) { - CHECK(s.substr(0, 6) == "custom") << "Not a valid custom datatype string"; + ICHECK(s.substr(0, 6) == "custom") << "Not a valid custom datatype string"; auto tmp = s.c_str(); - CHECK(s.c_str() == tmp); + ICHECK(s.c_str() == tmp); *scan = s.c_str() + 6; - CHECK(s.c_str() == tmp); + ICHECK(s.c_str() == tmp); if (**scan != '[') LOG(FATAL) << "expected opening brace after 'custom' type in" << s; - CHECK(s.c_str() == tmp); + ICHECK(s.c_str() == tmp); *scan += 1; - CHECK(s.c_str() == tmp); + ICHECK(s.c_str() == tmp); size_t custom_name_len = 0; - CHECK(s.c_str() == tmp); + ICHECK(s.c_str() == tmp); while (*scan + custom_name_len <= s.c_str() + s.length() && *(*scan + custom_name_len) != ']') ++custom_name_len; - CHECK(s.c_str() == tmp); + ICHECK(s.c_str() == tmp); if (*(*scan + custom_name_len) != ']') LOG(FATAL) << "expected closing brace after 'custom' type in" << s; - CHECK(s.c_str() == tmp); + ICHECK(s.c_str() == tmp); *scan += custom_name_len + 1; - CHECK(s.c_str() == tmp); + ICHECK(s.c_str() == tmp); auto type_name = s.substr(7, custom_name_len); - CHECK(s.c_str() == tmp); + ICHECK(s.c_str() == tmp); return GetCustomTypeCode(type_name); } @@ -128,7 +128,7 @@ class DeviceAPIManager { std::string factory = "device_api." + name; auto* f = Registry::Get(factory); if (f == nullptr) { - CHECK(allow_missing) << "Device API " << name << " is not enabled."; + ICHECK(allow_missing) << "Device API " << name << " is not enabled."; return nullptr; } void* ptr = (*f)(); @@ -176,7 +176,7 @@ void DeviceAPI::SyncStreamFromTo(TVMContext ctx, TVMStreamHandle event_src, /*! * \brief Normalize error message * - * Parse them header generated by by LOG(FATAL) and CHECK + * Parse them header generated by by LOG(FATAL) and ICHECK * and reformat the message into the standard format. * * This function will also merge all the stack traces into @@ -451,7 +451,7 @@ int TVMFuncCall(TVMFunctionHandle func, TVMValue* args, int* arg_type_codes, int int TVMCFuncSetReturn(TVMRetValueHandle ret, TVMValue* value, int* type_code, int num_ret) { API_BEGIN(); - CHECK_EQ(num_ret, 1); + ICHECK_EQ(num_ret, 1); TVMRetValue* rv = static_cast(ret); *rv = TVMArgValue(value[0], type_code[0]); API_END(); diff --git a/src/runtime/container.cc b/src/runtime/container.cc index 253243271d93..916a912b3c5e 100644 --- a/src/runtime/container.cc +++ b/src/runtime/container.cc @@ -45,7 +45,7 @@ TVM_REGISTER_GLOBAL("runtime.GetADTFields").set_body([](TVMArgs args, TVMRetValu ObjectRef obj = args[0]; int idx = args[1]; const auto& adt = Downcast(obj); - CHECK_LT(idx, adt.size()); + ICHECK_LT(idx, adt.size()); *rv = adt[idx]; }); diff --git a/src/runtime/contrib/arm_compute_lib/acl_allocator.cc b/src/runtime/contrib/arm_compute_lib/acl_allocator.cc index 2feb5b03c88b..f9a67010e6e2 100644 --- a/src/runtime/contrib/arm_compute_lib/acl_allocator.cc +++ b/src/runtime/contrib/arm_compute_lib/acl_allocator.cc @@ -29,7 +29,7 @@ namespace runtime { namespace contrib { void* ACLAllocator::allocate(size_t size, size_t alignment) { - CHECK_GT(size, 0) << "Cannot allocate size less than or equal to zero"; + ICHECK_GT(size, 0) << "Cannot allocate size less than or equal to zero"; return this->device_api_->AllocWorkspace(this->ctx_, size, {}); } diff --git a/src/runtime/contrib/arm_compute_lib/acl_runtime.cc b/src/runtime/contrib/arm_compute_lib/acl_runtime.cc index e5f2c2d47281..09879bdc6e95 100644 --- a/src/runtime/contrib/arm_compute_lib/acl_runtime.cc +++ b/src/runtime/contrib/arm_compute_lib/acl_runtime.cc @@ -75,7 +75,7 @@ class ACLRuntime : public JSONRuntimeBase { * \param consts The constant params from compiled model. */ void Init(const Array& consts) override { - CHECK_EQ(consts.size(), const_idx_.size()) + ICHECK_EQ(consts.size(), const_idx_.size()) << "The number of input constants must match the number of required."; SetupConstants(consts); BuildEngine(); @@ -222,7 +222,7 @@ class ACLRuntime : public JSONRuntimeBase { arm_compute::PadStrideInfo pad_stride_info = MakeACLPadStride(padding, strides); int groups = std::stoi(node.GetAttr>("groups")[0]); - CHECK(groups == 1) << "Arm Compute Library NEON convolution only supports group size of 1."; + ICHECK(groups == 1) << "Arm Compute Library NEON convolution only supports group size of 1."; arm_compute::ActivationLayerInfo act_info; if (node.HasAttr("activation_type")) { @@ -242,7 +242,7 @@ class ACLRuntime : public JSONRuntimeBase { size_t num_inputs = inputs.size(); bool has_bias; if (node.GetOpName() == "qnn.conv2d") { - CHECK(num_inputs >= 8U && num_inputs <= 9U) + ICHECK(num_inputs >= 8U && num_inputs <= 9U) << "Quantized convolution requires 9 inputs with a bias, 8 inputs without."; has_bias = num_inputs == 9; layer->inputs.push_back(MakeACLTensorFromJSONEntry(inputs[0], &inputs[4], &inputs[2])); @@ -253,7 +253,7 @@ class ACLRuntime : public JSONRuntimeBase { layer->outputs.push_back( MakeACLTensorFromJSONNode(node, &inputs[6 + has_bias], &inputs[7 + has_bias])); } else { - CHECK(num_inputs >= 2U && num_inputs <= 3U) + ICHECK(num_inputs >= 2U && num_inputs <= 3U) << "Convolution requires 3 inputs with a bias, 2 inputs without."; has_bias = num_inputs == 3; for (const auto& i : inputs) { @@ -286,7 +286,7 @@ class ACLRuntime : public JSONRuntimeBase { size_t num_inputs = inputs.size(); bool has_bias; if (node.GetOpName() == "qnn.dense") { - CHECK(num_inputs >= 8U && num_inputs <= 9U) + ICHECK(num_inputs >= 8U && num_inputs <= 9U) << "Quantized fully connected (dense) layer requires 9 inputs with a bias, 8 inputs " "without."; has_bias = num_inputs == 9; @@ -298,7 +298,7 @@ class ACLRuntime : public JSONRuntimeBase { layer->outputs.push_back( MakeACLTensorFromJSONNode(node, &inputs[6 + has_bias], &inputs[7 + has_bias])); } else { - CHECK(num_inputs >= 2U && num_inputs <= 3U) + ICHECK(num_inputs >= 2U && num_inputs <= 3U) << "Fully connected (dense) layer requires 3 inputs with a bias, 2 inputs without."; has_bias = num_inputs == 3; for (const auto& i : inputs) { diff --git a/src/runtime/contrib/arm_compute_lib/acl_utils.cc b/src/runtime/contrib/arm_compute_lib/acl_utils.cc index 59c941df5195..0b6d27623a1a 100644 --- a/src/runtime/contrib/arm_compute_lib/acl_utils.cc +++ b/src/runtime/contrib/arm_compute_lib/acl_utils.cc @@ -35,7 +35,8 @@ namespace contrib { using JSONGraphNode = tvm::runtime::json::JSONGraphNode; void CheckACLError(const arm_compute::Status& status) { - CHECK(status.error_code() == arm_compute::ErrorCode::OK) << "ACL: " << status.error_description(); + ICHECK(status.error_code() == arm_compute::ErrorCode::OK) + << "ACL: " << status.error_description(); } arm_compute::Tensor MakeACLTensor(const JSONGraphNode& tensor_rep, void* data, @@ -65,7 +66,7 @@ arm_compute::TensorInfo MakeACLTensorInfo(const std::vector& shape, if (scale != nullptr && offset != nullptr) { std::vector scale_data = GetVectorFromDLTensor(scale); std::vector offset_data = GetVectorFromDLTensor(offset); - CHECK(scale_data.size() == 1 && offset_data.size() == 1) + ICHECK(scale_data.size() == 1 && offset_data.size() == 1) << "Currently only per-layer quantization is supported in the Arm Compute Library runtime."; arm_compute::QuantizationInfo qinfo(scale_data[0], offset_data[0]); info.set_quantization_info(qinfo); @@ -134,7 +135,7 @@ arm_compute::DataType MakeACLDataType(const DLDataType& data_type) { template std::vector GetVectorFromDLTensor(const DLTensor* tensor) { - CHECK(tensor) << "Cannot convert a nullptr"; + ICHECK(tensor) << "Cannot convert a nullptr"; int len = 1; for (int i = 0; i < tensor->ndim; i++) { len *= tensor->shape[i]; diff --git a/src/runtime/contrib/cblas/cblas.cc b/src/runtime/contrib/cblas/cblas.cc index 80d39f6efa9c..16496e06aae3 100644 --- a/src/runtime/contrib/cblas/cblas.cc +++ b/src/runtime/contrib/cblas/cblas.cc @@ -20,9 +20,9 @@ /*! * \file Use external cblas library call. */ -#include #include #include +#include extern "C" { #include @@ -125,7 +125,7 @@ struct CblasDgemmBatchIterativeOp { // matrix multiplication for row major TVM_REGISTER_GLOBAL("tvm.contrib.cblas.matmul").set_body([](TVMArgs args, TVMRetValue* ret) { DLTensor* A = args[0]; - CHECK(TypeMatch(A->dtype, kDLFloat, 32) || TypeMatch(A->dtype, kDLFloat, 64)); + ICHECK(TypeMatch(A->dtype, kDLFloat, 32) || TypeMatch(A->dtype, kDLFloat, 64)); if (TypeMatch(A->dtype, kDLFloat, 32)) CallGemm(args, ret, CblasSgemmOp()); @@ -135,7 +135,7 @@ TVM_REGISTER_GLOBAL("tvm.contrib.cblas.matmul").set_body([](TVMArgs args, TVMRet TVM_REGISTER_GLOBAL("tvm.contrib.cblas.batch_matmul").set_body([](TVMArgs args, TVMRetValue* ret) { DLTensor* A = args[0]; - CHECK(TypeMatch(A->dtype, kDLFloat, 32) || TypeMatch(A->dtype, kDLFloat, 64)); + ICHECK(TypeMatch(A->dtype, kDLFloat, 32) || TypeMatch(A->dtype, kDLFloat, 64)); if (TypeMatch(A->dtype, kDLFloat, 32)) { CallBatchGemm(args, ret, CblasSgemmBatchOp()); } else { @@ -146,7 +146,7 @@ TVM_REGISTER_GLOBAL("tvm.contrib.cblas.batch_matmul").set_body([](TVMArgs args, TVM_REGISTER_GLOBAL("tvm.contrib.cblas.batch_matmul_iterative") .set_body([](TVMArgs args, TVMRetValue* ret) { DLTensor* A = args[0]; - CHECK(TypeMatch(A->dtype, kDLFloat, 32) || TypeMatch(A->dtype, kDLFloat, 64)); + ICHECK(TypeMatch(A->dtype, kDLFloat, 32) || TypeMatch(A->dtype, kDLFloat, 64)); if (TypeMatch(A->dtype, kDLFloat, 32)) { CallBatchGemm(args, ret, CblasSgemmBatchIterativeOp()); } else { diff --git a/src/runtime/contrib/cblas/gemm_common.h b/src/runtime/contrib/cblas/gemm_common.h index d92f9d710a44..6c31fbdd06a3 100644 --- a/src/runtime/contrib/cblas/gemm_common.h +++ b/src/runtime/contrib/cblas/gemm_common.h @@ -71,23 +71,23 @@ inline void CallGemm(TVMArgs args, TVMRetValue* ret, TGemmOp op) { bool transa = args[3]; bool transb = args[4]; int bit_depth = sizeof(typename TGemmOp::TDatatype) * 8; - CHECK_EQ(A->ndim, 2); - CHECK_EQ(B->ndim, 2); - CHECK_EQ(C->ndim, 2); + ICHECK_EQ(A->ndim, 2); + ICHECK_EQ(B->ndim, 2); + ICHECK_EQ(C->ndim, 2); - CHECK_EQ(ElementStride(A), 1); - CHECK_EQ(ElementStride(B), 1); - CHECK_EQ(ElementStride(C), 1); + ICHECK_EQ(ElementStride(A), 1); + ICHECK_EQ(ElementStride(B), 1); + ICHECK_EQ(ElementStride(C), 1); // C can never be transposed. - CHECK(!IsInPlaceTransposed(C)); + ICHECK(!IsInPlaceTransposed(C)); // Reversed strides indicates an in-place transpose operation. transa = IsInPlaceTransposed(A) ? !transa : transa; transb = IsInPlaceTransposed(B) ? !transb : transb; - CHECK(TypeMatch(B->dtype, kDLFloat, bit_depth)); - CHECK(TypeMatch(C->dtype, kDLFloat, bit_depth)); + ICHECK(TypeMatch(B->dtype, kDLFloat, bit_depth)); + ICHECK(TypeMatch(C->dtype, kDLFloat, bit_depth)); double alpha = args.size() > 5 ? args[5] : 1.0; double beta = args.size() > 6 ? args[6] : 0.0; op(transb, transa, ColumnCount(B, transb), RowCount(A, transa), ColumnCount(A, transa), @@ -118,24 +118,24 @@ inline void CallU8S8S32Gemm(TVMArgs args, TVMRetValue* ret, TGemmOp op) { int offset_c[1]; offset_c[0] = 0; - CHECK_EQ(A->ndim, 2); - CHECK_EQ(B->ndim, 2); - CHECK_EQ(C->ndim, 2); + ICHECK_EQ(A->ndim, 2); + ICHECK_EQ(B->ndim, 2); + ICHECK_EQ(C->ndim, 2); - CHECK_EQ(ElementStride(A), 1); - CHECK_EQ(ElementStride(B), 1); - CHECK_EQ(ElementStride(C), 1); + ICHECK_EQ(ElementStride(A), 1); + ICHECK_EQ(ElementStride(B), 1); + ICHECK_EQ(ElementStride(C), 1); // C can never be transposed. - CHECK(!IsInPlaceTransposed(C)); + ICHECK(!IsInPlaceTransposed(C)); // Reversed strides indicates an in-place transpose operation. transa = IsInPlaceTransposed(A) ? !transa : transa; transb = IsInPlaceTransposed(B) ? !transb : transb; - CHECK(TypeMatch(A->dtype, kDLUInt, 8)); - CHECK(TypeMatch(B->dtype, kDLInt, 8)); - CHECK(TypeMatch(C->dtype, kDLInt, 32)); + ICHECK(TypeMatch(A->dtype, kDLUInt, 8)); + ICHECK(TypeMatch(B->dtype, kDLInt, 8)); + ICHECK(TypeMatch(C->dtype, kDLInt, 32)); double alpha = args.size() > 5 ? args[5] : 1.0; double beta = args.size() > 6 ? args[6] : 0.0; op(transb, transa, ColumnCount(B, transb), RowCount(A, transa), ColumnCount(A, transa), @@ -180,22 +180,22 @@ inline void CallBatchGemm(TVMArgs args, TVMRetValue* ret, TBatchGemmOp op) { bool transa = args[3]; bool transb = args[4]; int bit_depth = sizeof(DType) * 8; - CHECK_EQ(A->ndim, 3); - CHECK_EQ(B->ndim, 3); - CHECK_EQ(C->ndim, 3); + ICHECK_EQ(A->ndim, 3); + ICHECK_EQ(B->ndim, 3); + ICHECK_EQ(C->ndim, 3); int batch_size = BatchCount3D(A); - CHECK_EQ(BatchCount3D(B), batch_size); - CHECK_EQ(BatchCount3D(C), batch_size); - CHECK_EQ(ElementStride(A), 1); - CHECK_EQ(ElementStride(B), 1); - CHECK_EQ(ElementStride(C), 1); + ICHECK_EQ(BatchCount3D(B), batch_size); + ICHECK_EQ(BatchCount3D(C), batch_size); + ICHECK_EQ(ElementStride(A), 1); + ICHECK_EQ(ElementStride(B), 1); + ICHECK_EQ(ElementStride(C), 1); // C can never be transposed. - CHECK(!IsInPlaceTransposed3D(C)); + ICHECK(!IsInPlaceTransposed3D(C)); // Reversed strides indicates an in-place transpose operation. transa = IsInPlaceTransposed3D(A) ? !transa : transa; transb = IsInPlaceTransposed3D(B) ? !transb : transb; - CHECK(TypeMatch(B->dtype, kDLFloat, bit_depth)); - CHECK(TypeMatch(C->dtype, kDLFloat, bit_depth)); + ICHECK(TypeMatch(B->dtype, kDLFloat, bit_depth)); + ICHECK(TypeMatch(C->dtype, kDLFloat, bit_depth)); double alpha = args.size() > 5 ? args[5] : 1.0; double beta = args.size() > 6 ? args[6] : 0.0; const int A_size = A->shape[1] * A->shape[2]; diff --git a/src/runtime/contrib/cblas/mkl.cc b/src/runtime/contrib/cblas/mkl.cc index 14e2375a311e..273aa45367dd 100644 --- a/src/runtime/contrib/cblas/mkl.cc +++ b/src/runtime/contrib/cblas/mkl.cc @@ -20,9 +20,9 @@ /*! * \file Use external mkl library call. */ -#include #include #include +#include extern "C" { #include @@ -156,7 +156,7 @@ struct MKLDgemmBatchIterativeOp { // matrix multiplication for row major TVM_REGISTER_GLOBAL("tvm.contrib.mkl.matmul").set_body([](TVMArgs args, TVMRetValue* ret) { DLTensor* A = args[0]; - CHECK(TypeMatch(A->dtype, kDLFloat, 32) || TypeMatch(A->dtype, kDLFloat, 64)); + ICHECK(TypeMatch(A->dtype, kDLFloat, 32) || TypeMatch(A->dtype, kDLFloat, 64)); if (TypeMatch(A->dtype, kDLFloat, 32)) CallGemm(args, ret, MKLSgemmOp()); @@ -169,15 +169,15 @@ TVM_REGISTER_GLOBAL("tvm.contrib.mkl.matmul_u8s8s32").set_body([](TVMArgs args, DLTensor* A = args[0]; DLTensor* B = args[1]; DLTensor* C = args[2]; - CHECK(TypeMatch(A->dtype, kDLUInt, 8) && TypeMatch(B->dtype, kDLInt, 8) && - TypeMatch(C->dtype, kDLInt, 32)); + ICHECK(TypeMatch(A->dtype, kDLUInt, 8) && TypeMatch(B->dtype, kDLInt, 8) && + TypeMatch(C->dtype, kDLInt, 32)); CallU8S8S32Gemm(args, ret, MKLGemmU8S8S32Op()); }); TVM_REGISTER_GLOBAL("tvm.contrib.mkl.batch_matmul").set_body([](TVMArgs args, TVMRetValue* ret) { DLTensor* A = args[0]; - CHECK(TypeMatch(A->dtype, kDLFloat, 32) || TypeMatch(A->dtype, kDLFloat, 64)); + ICHECK(TypeMatch(A->dtype, kDLFloat, 32) || TypeMatch(A->dtype, kDLFloat, 64)); if (TypeMatch(A->dtype, kDLFloat, 32)) { CallBatchGemm(args, ret, MKLSgemmBatchOp()); } else { @@ -188,7 +188,7 @@ TVM_REGISTER_GLOBAL("tvm.contrib.mkl.batch_matmul").set_body([](TVMArgs args, TV TVM_REGISTER_GLOBAL("tvm.contrib.mkl.batch_matmul_iterative") .set_body([](TVMArgs args, TVMRetValue* ret) { DLTensor* A = args[0]; - CHECK(TypeMatch(A->dtype, kDLFloat, 32) || TypeMatch(A->dtype, kDLFloat, 64)); + ICHECK(TypeMatch(A->dtype, kDLFloat, 32) || TypeMatch(A->dtype, kDLFloat, 64)); if (TypeMatch(A->dtype, kDLFloat, 32)) { CallBatchGemm(args, ret, MKLSgemmBatchIterativeOp()); } else { diff --git a/src/runtime/contrib/cblas/mkldnn.cc b/src/runtime/contrib/cblas/mkldnn.cc index 43c0dba595cc..1c3fa023dcc7 100644 --- a/src/runtime/contrib/cblas/mkldnn.cc +++ b/src/runtime/contrib/cblas/mkldnn.cc @@ -20,9 +20,9 @@ /*! * \file Use external cblas library call. */ -#include #include #include +#include extern "C" { #include @@ -48,7 +48,7 @@ struct MKLDNNSgemmOp { // matrix multiplication for row major TVM_REGISTER_GLOBAL("tvm.contrib.mkldnn.matmul").set_body([](TVMArgs args, TVMRetValue* ret) { DLTensor* A = args[0]; - CHECK(TypeMatch(A->dtype, kDLFloat, 32)); + ICHECK(TypeMatch(A->dtype, kDLFloat, 32)); CallGemm(args, ret, MKLDNNSgemmOp()); }); } // namespace contrib diff --git a/src/runtime/contrib/coreml/coreml_runtime.mm b/src/runtime/contrib/coreml/coreml_runtime.mm index fafc14a6898a..18d4f735a55e 100644 --- a/src/runtime/contrib/coreml/coreml_runtime.mm +++ b/src/runtime/contrib/coreml/coreml_runtime.mm @@ -59,7 +59,7 @@ MLMultiArray* dest = [[MLMultiArray alloc] initWithShape:shape dataType:dataType error:nil]; - CHECK(data_in->strides == NULL); + ICHECK(data_in->strides == NULL); memcpy(dest.dataPointer, data_in->data, size); NSString* nsKey = [NSString stringWithUTF8String:key.c_str()]; @@ -155,7 +155,8 @@ // Copy input tensors to corresponding data entries. for (auto i = 0; i < args.size() - 1; ++i) { - CHECK(args[i].type_code() == kTVMDLTensorHandle || args[i].type_code() == kTVMNDArrayHandle) + ICHECK(args[i].type_code() == kTVMDLTensorHandle || + args[i].type_code() == kTVMNDArrayHandle) << "Expect NDArray or DLTensor as inputs\n"; if (args[i].type_code() == kTVMDLTensorHandle) { model_->SetInput([input_names[i] UTF8String], args[i]); @@ -238,7 +239,7 @@ Module CoreMLRuntimeLoadFromBinary(void* strm) { NSString* model_path = [tempDir stringByAppendingPathComponent:dirname]; NSURL* url = [NSURL fileURLWithPath:model_path]; BOOL res = [dirWrapper writeToURL:url options:0 originalContentsURL:nil error:nil]; - CHECK(res) << "Failed to create model directory " << [model_path UTF8String]; + ICHECK(res) << "Failed to create model directory " << [model_path UTF8String]; auto exec = make_object(); exec->Init(symbol, [model_path UTF8String]); diff --git a/src/runtime/contrib/cublas/cublas.cc b/src/runtime/contrib/cublas/cublas.cc index 59367d17405d..ce69d4ca7bde 100644 --- a/src/runtime/contrib/cublas/cublas.cc +++ b/src/runtime/contrib/cublas/cublas.cc @@ -20,9 +20,9 @@ /*! * \file Use external cblas library call. */ -#include #include #include +#include #include "../cblas/gemm_common.h" #include "cublas_utils.h" @@ -152,19 +152,19 @@ inline void CallLtIgemm(TVMArgs args, TVMRetValue* ret, cublasLtHandle_t hdl) { int lda = M * K / (roundoff(K, 32) / 32); int ldb = K * N / (roundoff(K, 32) / 32); int ldc = M * N_out / (roundoff(N_out, 32) / 32); - CHECK_EQ(A->ndim, 2); - CHECK_EQ(B->ndim, 2); - CHECK_EQ(C->ndim, 2); + ICHECK_EQ(A->ndim, 2); + ICHECK_EQ(B->ndim, 2); + ICHECK_EQ(C->ndim, 2); - CHECK_EQ(ElementStride(A), 1); - CHECK_EQ(ElementStride(B), 1); - CHECK_EQ(ElementStride(C), 1); + ICHECK_EQ(ElementStride(A), 1); + ICHECK_EQ(ElementStride(B), 1); + ICHECK_EQ(ElementStride(C), 1); - CHECK(TypeEqual(A->dtype, B->dtype)); - CHECK(TypeMatch(A->dtype, kDLInt, 8)); - CHECK(TypeMatch(C->dtype, kDLInt, 32)); + ICHECK(TypeEqual(A->dtype, B->dtype)); + ICHECK(TypeMatch(A->dtype, kDLInt, 8)); + ICHECK(TypeMatch(C->dtype, kDLInt, 32)); - CHECK(CheckMixPrecisionType(A->dtype, C->dtype)) << "Unsupported data type"; + ICHECK(CheckMixPrecisionType(A->dtype, C->dtype)) << "Unsupported data type"; int32_t alpha = args.size() > 5 ? args[5] : 1; int32_t beta = args.size() > 6 ? args[6] : 0; cublasLtMatrixLayout_t Adesc = NULL, Bdesc = NULL, Cdesc = NULL; @@ -214,27 +214,27 @@ inline void CallGemmEx(TVMArgs args, TVMRetValue* ret, cublasHandle_t hdl) { DLTensor* C = args[2]; bool transa = args[3]; bool transb = args[4]; - CHECK_EQ(A->ndim, 2); - CHECK_EQ(B->ndim, 2); - CHECK_EQ(C->ndim, 2); + ICHECK_EQ(A->ndim, 2); + ICHECK_EQ(B->ndim, 2); + ICHECK_EQ(C->ndim, 2); - CHECK_EQ(ElementStride(A), 1); - CHECK_EQ(ElementStride(B), 1); - CHECK_EQ(ElementStride(C), 1); + ICHECK_EQ(ElementStride(A), 1); + ICHECK_EQ(ElementStride(B), 1); + ICHECK_EQ(ElementStride(C), 1); - CHECK(TypeEqual(A->dtype, B->dtype)); + ICHECK(TypeEqual(A->dtype, B->dtype)); // C can never be transposed. - CHECK(!IsInPlaceTransposed(C)); + ICHECK(!IsInPlaceTransposed(C)); // Reversed strides indicates an in-place transpose operation. transa = IsInPlaceTransposed(A) ? !transa : transa; transb = IsInPlaceTransposed(B) ? !transb : transb; - CHECK(CheckMixPrecisionType(A->dtype, C->dtype)) << "Unsupported data type"; - CHECK(!TypeMatch(A->dtype, kDLInt, 8) || ColumnStride(A) % 4 == 0) + ICHECK(CheckMixPrecisionType(A->dtype, C->dtype)) << "Unsupported data type"; + ICHECK(!TypeMatch(A->dtype, kDLInt, 8) || ColumnStride(A) % 4 == 0) << "leading dimension must divide 4 for int8 gemm"; - CHECK(!TypeMatch(B->dtype, kDLInt, 8) || ColumnStride(B) % 4 == 0) + ICHECK(!TypeMatch(B->dtype, kDLInt, 8) || ColumnStride(B) % 4 == 0) << "leading dimension must divide 4 for int8 gemm"; double alpha = args.size() > 5 ? args[5] : 1.0; double beta = args.size() > 6 ? args[6] : 0.0; @@ -272,29 +272,29 @@ inline void CallBatchGemmEx(TVMArgs args, TVMRetValue* ret, cublasHandle_t hdl) DLTensor* C = args[2]; bool transa = args[3]; bool transb = args[4]; - CHECK_EQ(A->ndim, 3); - CHECK_EQ(B->ndim, 3); - CHECK_EQ(C->ndim, 3); + ICHECK_EQ(A->ndim, 3); + ICHECK_EQ(B->ndim, 3); + ICHECK_EQ(C->ndim, 3); int batch_size = BatchCount3D(A); - CHECK_EQ(BatchCount3D(B), batch_size); - CHECK_EQ(BatchCount3D(C), batch_size); - CHECK_EQ(ElementStride(A), 1); - CHECK_EQ(ElementStride(B), 1); - CHECK_EQ(ElementStride(C), 1); + ICHECK_EQ(BatchCount3D(B), batch_size); + ICHECK_EQ(BatchCount3D(C), batch_size); + ICHECK_EQ(ElementStride(A), 1); + ICHECK_EQ(ElementStride(B), 1); + ICHECK_EQ(ElementStride(C), 1); - CHECK(TypeEqual(A->dtype, B->dtype)); + ICHECK(TypeEqual(A->dtype, B->dtype)); // C can never be transposed. - CHECK(!IsInPlaceTransposed(C)); + ICHECK(!IsInPlaceTransposed(C)); // Reversed strides indicates an in-place transpose operation. transa = IsInPlaceTransposed(A) ? !transa : transa; transb = IsInPlaceTransposed(B) ? !transb : transb; - CHECK(CheckMixPrecisionType(A->dtype, C->dtype, false)) << "Unsupported data type"; - CHECK(!TypeMatch(A->dtype, kDLInt, 8) || ColumnStride(A) % 4 == 0) + ICHECK(CheckMixPrecisionType(A->dtype, C->dtype, false)) << "Unsupported data type"; + ICHECK(!TypeMatch(A->dtype, kDLInt, 8) || ColumnStride(A) % 4 == 0) << "leading dimension must divide 4 for int8 gemm"; - CHECK(!TypeMatch(B->dtype, kDLInt, 8) || ColumnStride(B) % 4 == 0) + ICHECK(!TypeMatch(B->dtype, kDLInt, 8) || ColumnStride(B) % 4 == 0) << "leading dimension must divide 4 for int8 gemm"; double alpha = args.size() > 5 ? args[5] : 1.0; double beta = args.size() > 6 ? args[6] : 0.0; @@ -339,8 +339,8 @@ TVM_REGISTER_GLOBAL("tvm.contrib.cublas.matmul").set_body([](TVMArgs args, TVMRe CUBLASTryEnableTensorCore(entry_ptr->handle); if (TypeEqual(A->dtype, C->dtype)) { - CHECK(TypeMatch(A->dtype, kDLFloat, 16) || TypeMatch(A->dtype, kDLFloat, 32) || - TypeMatch(A->dtype, kDLFloat, 64)); + ICHECK(TypeMatch(A->dtype, kDLFloat, 16) || TypeMatch(A->dtype, kDLFloat, 32) || + TypeMatch(A->dtype, kDLFloat, 64)); if (TypeMatch(A->dtype, kDLFloat, 16)) CallGemm(args, ret, CublasHgemmOp(entry_ptr->handle)); @@ -361,7 +361,7 @@ TVM_REGISTER_GLOBAL("tvm.contrib.cublaslt.matmul").set_body([](TVMArgs args, TVM CUBLASTryEnableTensorCore(entry_ptr->handle); - CHECK(TypeMatch(A->dtype, kDLInt, 8)) << "Expects dtype to be int8\n"; + ICHECK(TypeMatch(A->dtype, kDLInt, 8)) << "Expects dtype to be int8\n"; cublasLtHandle_t ltHandle; CHECK_CUBLAS_ERROR(cublasLtCreate(<Handle)); CallLtIgemm(args, ret, ltHandle); @@ -377,8 +377,8 @@ TVM_REGISTER_GLOBAL("tvm.contrib.cublas.batch_matmul").set_body([](TVMArgs args, CUBLASTryEnableTensorCore(entry_ptr->handle); if (TypeEqual(A->dtype, C->dtype)) { - CHECK(TypeMatch(A->dtype, kDLFloat, 16) || TypeMatch(A->dtype, kDLFloat, 32) || - TypeMatch(A->dtype, kDLFloat, 64)); + ICHECK(TypeMatch(A->dtype, kDLFloat, 16) || TypeMatch(A->dtype, kDLFloat, 32) || + TypeMatch(A->dtype, kDLFloat, 64)); if (TypeMatch(A->dtype, kDLFloat, 16)) CallBatchGemm(args, ret, CublasHgemmBatchOp(entry_ptr->handle)); diff --git a/src/runtime/contrib/cublas/cublas_utils.h b/src/runtime/contrib/cublas/cublas_utils.h index 5189c4f483a8..32c3b03ddbb0 100644 --- a/src/runtime/contrib/cublas/cublas_utils.h +++ b/src/runtime/contrib/cublas/cublas_utils.h @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #if CUDART_VERSION >= 10010 @@ -63,10 +63,10 @@ inline const char* GetCublasErrorString(int error) { } #ifndef CHECK_CUBLAS_ERROR -#define CHECK_CUBLAS_ERROR(fn) \ - do { \ - int error = static_cast(fn); \ - CHECK_EQ(error, CUBLAS_STATUS_SUCCESS) << "CUBLAS: " << GetCublasErrorString(error); \ +#define CHECK_CUBLAS_ERROR(fn) \ + do { \ + int error = static_cast(fn); \ + ICHECK_EQ(error, CUBLAS_STATUS_SUCCESS) << "CUBLAS: " << GetCublasErrorString(error); \ } while (0) // ; intentionally left off. #endif // CHECK_CUBLAS_ERROR diff --git a/src/runtime/contrib/cudnn/cudnn_utils.h b/src/runtime/contrib/cudnn/cudnn_utils.h index 1b4eb40f193f..528298b75187 100644 --- a/src/runtime/contrib/cudnn/cudnn_utils.h +++ b/src/runtime/contrib/cudnn/cudnn_utils.h @@ -25,18 +25,18 @@ #define TVM_RUNTIME_CONTRIB_CUDNN_CUDNN_UTILS_H_ #include -#include #include +#include #include "../../cuda/cuda_common.h" namespace tvm { namespace contrib { -#define CUDNN_CALL(func) \ - { \ - cudnnStatus_t e = (func); \ - CHECK_EQ(e, CUDNN_STATUS_SUCCESS) << "cuDNN: " << cudnnGetErrorString(e); \ +#define CUDNN_CALL(func) \ + { \ + cudnnStatus_t e = (func); \ + ICHECK_EQ(e, CUDNN_STATUS_SUCCESS) << "cuDNN: " << cudnnGetErrorString(e); \ } /*! breif Convert DLTensor type to CuDNN type */ diff --git a/src/runtime/contrib/cudnn/softmax.cc b/src/runtime/contrib/cudnn/softmax.cc index ff6d6a1dbd81..648c9b633ea4 100644 --- a/src/runtime/contrib/cudnn/softmax.cc +++ b/src/runtime/contrib/cudnn/softmax.cc @@ -39,7 +39,7 @@ TVM_REGISTER_GLOBAL("tvm.contrib.cudnn.softmax.forward") int ndim = x->ndim; int64_t* shape = x->shape; if (axis < 0) axis += ndim; - CHECK(axis >= 0 && axis < ndim); + ICHECK(axis >= 0 && axis < ndim); CuDNNThreadEntry* entry_ptr = CuDNNThreadEntry::ThreadLocal(); entry_ptr->softmax_entry.data_type = CuDNNDataType::DLTypeToCuDNNType(x->dtype); diff --git a/src/runtime/contrib/dnnl/dnnl_json_runtime.cc b/src/runtime/contrib/dnnl/dnnl_json_runtime.cc index bda9f1a44932..eef67a702d9c 100644 --- a/src/runtime/contrib/dnnl/dnnl_json_runtime.cc +++ b/src/runtime/contrib/dnnl/dnnl_json_runtime.cc @@ -54,7 +54,7 @@ class DNNLJSONRuntime : public JSONRuntimeBase { void Init(const Array& consts) override { BuildEngine(); - CHECK_EQ(consts.size(), const_idx_.size()) + ICHECK_EQ(consts.size(), const_idx_.size()) << "The number of input constants must match the number of required."; // Setup constants entries for weights. @@ -98,7 +98,7 @@ class DNNLJSONRuntime : public JSONRuntimeBase { for (size_t nid = 0; nid < nodes_.size(); ++nid) { const auto& node = nodes_[nid]; if (node.GetOpType() == "kernel") { - CHECK_EQ(node.GetOpType(), "kernel"); + ICHECK_EQ(node.GetOpType(), "kernel"); auto op_name = node.GetOpName(); if ("nn.conv2d" == op_name) { Conv2d(nid); @@ -137,12 +137,12 @@ class DNNLJSONRuntime : public JSONRuntimeBase { auto eid = EntryID(entry); // Since the DNNL memory has been created before calling this function, we assume the entry // has not yet been bound to the other DNNL memory; otherwise it may have memory leak. - CHECK_EQ(entry_out_mem_.count(eid), 0); + ICHECK_EQ(entry_out_mem_.count(eid), 0); // TODO(@comanic): Support other data types (i.e., int8). auto data_node = nodes_[entry.id_]; auto dltype = data_node.GetOpDataType()[entry.index_]; - CHECK_EQ(dltype.bits, 32); + ICHECK_EQ(dltype.bits, 32); entry_out_mem_[eid] = {mem, offset}; return entry_out_mem_[eid].first; @@ -214,11 +214,11 @@ class DNNLJSONRuntime : public JSONRuntimeBase { net_.push_back(conv); // Data memory. - CHECK_EQ(node.GetAttr>("data_layout")[0], "NCHW"); + ICHECK_EQ(node.GetAttr>("data_layout")[0], "NCHW"); auto conv2d_src_memory = BindDNNLMemory(data_entry, {src_dims, dt::f32, tag::nchw}); // Weight memory. - CHECK_EQ(node.GetAttr>("kernel_layout")[0], "OIHW"); + ICHECK_EQ(node.GetAttr>("kernel_layout")[0], "OIHW"); auto conv2d_weights_memory = BindDNNLMemory( weight_entry, {weights_dims, dt::f32, (groups > 1) ? tag::goihw : tag::oihw}); @@ -343,7 +343,7 @@ class DNNLJSONRuntime : public JSONRuntimeBase { auto relu_desc = dnnl::eltwise_forward::desc(dnnl::prop_kind::forward_inference, dnnl::algorithm::eltwise_relu, data_md, 0); auto relu_prim_desc = dnnl::eltwise_forward::primitive_desc(relu_desc, engine_); - CHECK(data_md == relu_prim_desc.dst_desc()); + ICHECK(data_md == relu_prim_desc.dst_desc()); auto relu = dnnl::eltwise_forward(relu_prim_desc); net_.push_back(relu); @@ -364,7 +364,7 @@ class DNNLJSONRuntime : public JSONRuntimeBase { std::vector data_mds; std::vector data_memories; - CHECK_EQ(node.GetInputs().size(), 2U); + ICHECK_EQ(node.GetInputs().size(), 2U); for (auto entry : node.GetInputs()) { auto data_shape = nodes_[entry.id_].GetOpShape()[entry.index_]; dnnl::memory::desc data_md = GenDNNLMemDescByShape(data_shape, dt::f32); @@ -373,7 +373,7 @@ class DNNLJSONRuntime : public JSONRuntimeBase { data_mds.push_back(data_md); data_memories.push_back(BindDNNLMemory(entry, data_md)); } - CHECK(data_dims[0] == data_dims[1]); + ICHECK(data_dims[0] == data_dims[1]); auto out_md = data_mds[0]; JSONGraphNodeEntry out_entry(nid, 0); auto out_memory = BindDNNLMemory(out_entry, out_md); diff --git a/src/runtime/contrib/json/json_node.h b/src/runtime/contrib/json/json_node.h index 6a07129bf006..77c289b04c6d 100644 --- a/src/runtime/contrib/json/json_node.h +++ b/src/runtime/contrib/json/json_node.h @@ -73,13 +73,13 @@ class JSONGraphNodeEntry { */ void Load(dmlc::JSONReader* reader) { reader->BeginArray(); - CHECK(reader->NextArrayItem()) << "invalid json format"; + ICHECK(reader->NextArrayItem()) << "invalid json format"; reader->Read(&id_); - CHECK(reader->NextArrayItem()) << "invalid json format"; + ICHECK(reader->NextArrayItem()) << "invalid json format"; reader->Read(&index_); if (reader->NextArrayItem()) { reader->Read(&version_); - CHECK(!reader->NextArrayItem()) << "invalid json format"; + ICHECK(!reader->NextArrayItem()) << "invalid json format"; } else { version_ = 0; } @@ -145,27 +145,27 @@ class JSONGraphNode { } else if (key == "dtype") { std::vector tmp; reader->BeginArray(); - CHECK(reader->NextArrayItem()); + ICHECK(reader->NextArrayItem()); reader->Read(&tmp); - CHECK(!reader->NextArrayItem()); + ICHECK(!reader->NextArrayItem()); for (const auto& it : tmp) { dtype_.push_back(tvm::runtime::String2DLDataType(it)); } } else if (key == "shape") { reader->BeginArray(); - CHECK(reader->NextArrayItem()); + ICHECK(reader->NextArrayItem()); reader->Read(&shape_); - CHECK(!reader->NextArrayItem()); + ICHECK(!reader->NextArrayItem()); } else { reader->BeginArray(); - CHECK(reader->NextArrayItem()); + ICHECK(reader->NextArrayItem()); std::vector tmp; reader->Read(&tmp); attrs_[key] = tmp; - CHECK(!reader->NextArrayItem()); + ICHECK(!reader->NextArrayItem()); } } - CHECK_EQ(shape_.size(), dtype_.size()); + ICHECK_EQ(shape_.size(), dtype_.size()); } /*! @@ -256,7 +256,7 @@ class JSONGraphNode { */ template T GetAttr(const std::string& key) const { - CHECK_GT(attrs_.count(key), 0U) << "Key: " << key << "is not found"; + ICHECK_GT(attrs_.count(key), 0U) << "Key: " << key << "is not found"; return dmlc::get(attrs_.at(key)); } diff --git a/src/runtime/contrib/json/json_runtime.h b/src/runtime/contrib/json/json_runtime.h index 9eb7fcd2f689..3ae652ccaf24 100644 --- a/src/runtime/contrib/json/json_runtime.h +++ b/src/runtime/contrib/json/json_runtime.h @@ -78,7 +78,7 @@ class JSONRuntimeBase : public ModuleNode { [sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { *rv = this->const_names_; }); } else if (this->symbol_name_ == name) { return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { - CHECK(this->initialized_) << "The module has not been initialized"; + ICHECK(this->initialized_) << "The module has not been initialized"; // Bind argument tensors to data entries. this->SetInputOutputBuffers(args); @@ -88,7 +88,7 @@ class JSONRuntimeBase : public ModuleNode { } else if ("__init_" + this->symbol_name_ == name) { // The function to initialize constant tensors. return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { - CHECK_EQ(args.size(), 1U); + ICHECK_EQ(args.size(), 1U); this->Init(args[0]); this->initialized_ = true; *rv = 0; @@ -119,9 +119,9 @@ class JSONRuntimeBase : public ModuleNode { std::string graph_json; std::vector consts; // Load the symbol - CHECK(stream->Read(&symbol)) << "Loading symbol name failed"; - CHECK(stream->Read(&graph_json)) << "Loading graph json failed"; - CHECK(stream->Read(&consts)) << "Loading the const name list failed"; + ICHECK(stream->Read(&symbol)) << "Loading symbol name failed"; + ICHECK(stream->Read(&graph_json)) << "Loading graph json failed"; + ICHECK(stream->Read(&consts)) << "Loading the const name list failed"; Array const_names; for (const auto& it : consts) { const_names.push_back(it); @@ -146,13 +146,13 @@ class JSONRuntimeBase : public ModuleNode { * \param args The packed args. */ void SetInputOutputBuffers(const TVMArgs& args) { - CHECK_EQ(args.size(), input_var_eid_.size() + outputs_.size()) + ICHECK_EQ(args.size(), input_var_eid_.size() + outputs_.size()) << "Found mismatch in the number of provided data entryies and required."; for (size_t i = 0; i < static_cast(args.size()); i++) { auto eid = i < input_var_eid_.size() ? input_var_eid_[i] : EntryID(outputs_[i - input_var_eid_.size()]); - CHECK(args[i].type_code() == kTVMNDArrayHandle || args[i].type_code() == kTVMDLTensorHandle) + ICHECK(args[i].type_code() == kTVMNDArrayHandle || args[i].type_code() == kTVMDLTensorHandle) << "Expect NDArray or DLTensor as inputs"; const DLTensor* arg; @@ -183,23 +183,23 @@ class JSONRuntimeBase : public ModuleNode { uint32_t nid = input_nodes_[i]; std::string name = nodes_[nid].name_; if (nodes_[nid].op_type_ == "input") { - CHECK_EQ(nodes_[nid].GetOpShape().size(), nodes_[nid].GetOpDataType().size()); + ICHECK_EQ(nodes_[nid].GetOpShape().size(), nodes_[nid].GetOpDataType().size()); for (size_t j = 0; j < nodes_[nid].GetOpShape().size(); ++j) { input_var_eid_.push_back(EntryID(nid, j)); } } else { - CHECK_EQ(nodes_[nid].op_type_, "const"); + ICHECK_EQ(nodes_[nid].op_type_, "const"); auto pos = std::find(std::begin(const_names_), std::end(const_names_), name); - CHECK(pos != std::end(const_names_)) << "Found non-existent constant: " << name; + ICHECK(pos != std::end(const_names_)) << "Found non-existent constant: " << name; const_idx_.push_back(nid); consts.push_back(name); } } - CHECK_EQ(consts.size(), const_names_.size()) + ICHECK_EQ(consts.size(), const_names_.size()) << "Found mismatch for the number of constants in the graph and required."; for (size_t i = 0; i < consts.size(); i++) { - CHECK_EQ(consts[i], const_names_[i]) + ICHECK_EQ(consts[i], const_names_[i]) << "The position of constant in the graph must be the same as the required."; } diff --git a/src/runtime/contrib/miopen/miopen_utils.h b/src/runtime/contrib/miopen/miopen_utils.h index 4dec2ad710ba..9982f0914f6b 100644 --- a/src/runtime/contrib/miopen/miopen_utils.h +++ b/src/runtime/contrib/miopen/miopen_utils.h @@ -24,9 +24,9 @@ #ifndef TVM_RUNTIME_CONTRIB_MIOPEN_MIOPEN_UTILS_H_ #define TVM_RUNTIME_CONTRIB_MIOPEN_MIOPEN_UTILS_H_ -#include #include #include +#include #include @@ -38,10 +38,10 @@ namespace miopen { std::string miopenGetErrorString(int error_code); -#define MIOPEN_CALL(func) \ - { \ - miopenStatus_t e = (func); \ - CHECK_EQ(e, miopenStatusSuccess) << "miopen error: " << miopenGetErrorString(e); \ +#define MIOPEN_CALL(func) \ + { \ + miopenStatus_t e = (func); \ + ICHECK_EQ(e, miopenStatusSuccess) << "miopen error: " << miopenGetErrorString(e); \ } struct ConvEntry { diff --git a/src/runtime/contrib/mps/conv.mm b/src/runtime/contrib/mps/conv.mm index b598014f0267..3b16f0820d64 100644 --- a/src/runtime/contrib/mps/conv.mm +++ b/src/runtime/contrib/mps/conv.mm @@ -80,15 +80,15 @@ int pad = args[3]; int stride = args[4]; - CHECK_EQ(data->ndim, 4); - CHECK_EQ(weight->ndim, 4); - CHECK_EQ(output->ndim, 4); - CHECK(output->strides == nullptr); - CHECK(weight->strides == nullptr); - CHECK(data->strides == nullptr); - - CHECK_EQ(data->shape[0], 1); - CHECK_EQ(output->shape[0], 1); + ICHECK_EQ(data->ndim, 4); + ICHECK_EQ(weight->ndim, 4); + ICHECK_EQ(output->ndim, 4); + ICHECK(output->strides == nullptr); + ICHECK(weight->strides == nullptr); + ICHECK(data->strides == nullptr); + + ICHECK_EQ(data->shape[0], 1); + ICHECK_EQ(output->shape[0], 1); int oCh = weight->shape[0]; int kH = weight->shape[1]; diff --git a/src/runtime/contrib/mps/gemm.mm b/src/runtime/contrib/mps/gemm.mm index 109c952ff0c4..c1d80dbed7f3 100644 --- a/src/runtime/contrib/mps/gemm.mm +++ b/src/runtime/contrib/mps/gemm.mm @@ -31,19 +31,19 @@ bool transa = args[3]; bool transb = args[4]; // call gemm for simple compact code. - CHECK_EQ(A->ndim, 2); - CHECK_EQ(B->ndim, 2); - CHECK_EQ(C->ndim, 2); - CHECK(C->strides == nullptr); - CHECK(B->strides == nullptr); - CHECK(A->strides == nullptr); - CHECK(TypeMatch(A->dtype, kDLFloat, 32)); - CHECK(TypeMatch(B->dtype, kDLFloat, 32)); - CHECK(TypeMatch(C->dtype, kDLFloat, 32)); + ICHECK_EQ(A->ndim, 2); + ICHECK_EQ(B->ndim, 2); + ICHECK_EQ(C->ndim, 2); + ICHECK(C->strides == nullptr); + ICHECK(B->strides == nullptr); + ICHECK(A->strides == nullptr); + ICHECK(TypeMatch(A->dtype, kDLFloat, 32)); + ICHECK(TypeMatch(B->dtype, kDLFloat, 32)); + ICHECK(TypeMatch(C->dtype, kDLFloat, 32)); // Get Metal device API MetalThreadEntry* entry_ptr = MetalThreadEntry::ThreadLocal(); - // CHECK_EQ(A->ctx, B->ctx); - // CHECK_EQ(A->ctx, C->ctx); + // ICHECK_EQ(A->ctx, B->ctx); + // ICHECK_EQ(A->ctx, C->ctx); id dev = entry_ptr->metal_api->GetDevice(A->ctx); id queue = entry_ptr->metal_api->GetCommandQueue(A->ctx); id cb = [queue commandBuffer]; @@ -51,7 +51,7 @@ NSUInteger N = B->shape[1 - (transb ? 1 : 0)]; NSUInteger K = B->shape[0 + (transb ? 1 : 0)]; - CHECK_EQ(A->shape[1 - (transa ? 1 : 0)], K); + ICHECK_EQ(A->shape[1 - (transa ? 1 : 0)], K); // mps a MPSDataType dtype = MPSType::DLTypeToMPSType(A->dtype); MPSMatrixDescriptor* descA = @@ -86,7 +86,7 @@ interiorColumns:K alpha:1.0f beta:0.0f]; - CHECK(sgemm != nil); + ICHECK(sgemm != nil); [sgemm encodeToCommandBuffer:cb leftMatrix:matrixA rightMatrix:matrixB resultMatrix:matrixC]; [cb commit]; }); diff --git a/src/runtime/contrib/mps/mps_utils.h b/src/runtime/contrib/mps/mps_utils.h index 170451ea385b..d1c49732318a 100644 --- a/src/runtime/contrib/mps/mps_utils.h +++ b/src/runtime/contrib/mps/mps_utils.h @@ -25,11 +25,11 @@ #define TVM_RUNTIME_CONTRIB_MPS_MPS_UTILS_H_ #import -#include #include #include #include #include +#include #include diff --git a/src/runtime/contrib/nnpack/convolution.cc b/src/runtime/contrib/nnpack/convolution.cc index 54c9ea4f969b..b3ea6c891d43 100644 --- a/src/runtime/contrib/nnpack/convolution.cc +++ b/src/runtime/contrib/nnpack/convolution.cc @@ -20,11 +20,11 @@ /*! * \file Use external nnpack library call. */ -#include #include #include #include #include +#include #include "nnpack_utils.h" @@ -36,7 +36,7 @@ TVM_REGISTER_GLOBAL("tvm.contrib.nnpack.convolution_inference") .set_body([](TVMArgs args, TVMRetValue* ret) { NNPackThreadLocalEntry* entry = NNPackThreadLocalEntry::ThreadLocal(); static std::once_flag flag; - std::call_once(flag, []() { CHECK_EQ(nnp_initialize(), nnp_status_success); }); + std::call_once(flag, []() { ICHECK_EQ(nnp_initialize(), nnp_status_success); }); DLTensor* input = args[0]; DLTensor* kernel = args[1]; DLTensor* bias = nullptr; @@ -52,36 +52,36 @@ TVM_REGISTER_GLOBAL("tvm.contrib.nnpack.convolution_inference") uint64_t algo_ = args[11]; nnp_convolution_algorithm algo = static_cast(algo_); - CHECK_EQ(input->ndim, 4); - CHECK_EQ(kernel->ndim, 4); + ICHECK_EQ(input->ndim, 4); + ICHECK_EQ(kernel->ndim, 4); if (bias) { - CHECK_EQ(bias->ndim, 1); + ICHECK_EQ(bias->ndim, 1); } - CHECK_EQ(output->ndim, 4); - CHECK_EQ(input->shape[1], kernel->shape[1]); - CHECK_EQ(input->shape[0], output->shape[0]); + ICHECK_EQ(output->ndim, 4); + ICHECK_EQ(input->shape[1], kernel->shape[1]); + ICHECK_EQ(input->shape[0], output->shape[0]); size_t input_channels = input->shape[1]; - CHECK_EQ(output->shape[1], kernel->shape[0]); + ICHECK_EQ(output->shape[1], kernel->shape[0]); if (bias) { - CHECK_EQ(output->shape[1], bias->shape[0]); + ICHECK_EQ(output->shape[1], bias->shape[0]); } size_t output_channels = output->shape[1]; nnp_size input_size{static_cast(input->shape[2]), static_cast(input->shape[3])}; nnp_size kernel_size{static_cast(kernel->shape[2]), static_cast(kernel->shape[3])}; - CHECK(input->strides == nullptr); - CHECK(kernel->strides == nullptr); + ICHECK(input->strides == nullptr); + ICHECK(kernel->strides == nullptr); if (bias) { - CHECK(bias->strides == nullptr); + ICHECK(bias->strides == nullptr); } - CHECK(TypeMatch(input->dtype, kDLFloat, 32)); - CHECK(TypeMatch(kernel->dtype, kDLFloat, 32)); + ICHECK(TypeMatch(input->dtype, kDLFloat, 32)); + ICHECK(TypeMatch(kernel->dtype, kDLFloat, 32)); if (bias) { - CHECK(TypeMatch(bias->dtype, kDLFloat, 32)); + ICHECK(TypeMatch(bias->dtype, kDLFloat, 32)); } - CHECK(TypeMatch(output->dtype, kDLFloat, 32)); + ICHECK(TypeMatch(output->dtype, kDLFloat, 32)); // Allocate a zero-bias if we don't pass one in. std::unique_ptr> zero_bias; @@ -94,7 +94,7 @@ TVM_REGISTER_GLOBAL("tvm.contrib.nnpack.convolution_inference") algo, nnp_convolution_transform_strategy_compute, input_channels, output_channels, input_size, input_padding, kernel_size, stride_size, nullptr, nullptr, nullptr, nullptr, nullptr, &workspace_size, nnp_activation_identity, nullptr, entry->threadpool, nullptr); - CHECK_EQ(status, nnp_status_success); + ICHECK_EQ(status, nnp_status_success); // Division with rounding up, in case size is not multiple of sizeof(float) const size_t workspace_elements = (workspace_size + sizeof(float) - 1) / sizeof(float); @@ -105,7 +105,7 @@ TVM_REGISTER_GLOBAL("tvm.contrib.nnpack.convolution_inference") DeviceAPI* cpu_api = DeviceAPI::Get(ctx); void* workspace_buffer = cpu_api->AllocWorkspace(ctx, workspace_elements * sizeof(float), type_hint); - CHECK(workspace_buffer != nullptr); + ICHECK(workspace_buffer != nullptr); for (auto n = 0; n < input->shape[0]; ++n) { nnp_status status = nnp_convolution_inference( @@ -120,7 +120,7 @@ TVM_REGISTER_GLOBAL("tvm.contrib.nnpack.convolution_inference") workspace_buffer, &workspace_size, nnp_activation_identity, nullptr, entry->threadpool, nullptr); - CHECK_EQ(status, nnp_status_success); + ICHECK_EQ(status, nnp_status_success); } cpu_api->FreeWorkspace(ctx, workspace_buffer); }); @@ -129,7 +129,7 @@ TVM_REGISTER_GLOBAL("tvm.contrib.nnpack.convolution_inference_without_weight_tra .set_body([](TVMArgs args, TVMRetValue* ret) { NNPackThreadLocalEntry* entry = NNPackThreadLocalEntry::ThreadLocal(); static std::once_flag flag; - std::call_once(flag, []() { CHECK_EQ(nnp_initialize(), nnp_status_success); }); + std::call_once(flag, []() { ICHECK_EQ(nnp_initialize(), nnp_status_success); }); DLTensor* input = args[0]; DLTensor* transformed_kernel = args[1]; DLTensor* bias = nullptr; @@ -145,32 +145,32 @@ TVM_REGISTER_GLOBAL("tvm.contrib.nnpack.convolution_inference_without_weight_tra uint64_t algo_ = args[11]; nnp_convolution_algorithm algo = static_cast(algo_); - CHECK_EQ(input->ndim, 4); + ICHECK_EQ(input->ndim, 4); if (bias) { - CHECK_EQ(bias->ndim, 1); + ICHECK_EQ(bias->ndim, 1); } - CHECK_EQ(output->ndim, 4); - CHECK_EQ(input->shape[0], output->shape[0]); + ICHECK_EQ(output->ndim, 4); + ICHECK_EQ(input->shape[0], output->shape[0]); size_t input_channels = input->shape[1]; if (bias) { - CHECK_EQ(output->shape[1], bias->shape[0]); + ICHECK_EQ(output->shape[1], bias->shape[0]); } size_t output_channels = output->shape[1]; nnp_size input_size{static_cast(input->shape[2]), static_cast(input->shape[3])}; nnp_size kernel_size{3, 3}; - CHECK(input->strides == nullptr); - CHECK(transformed_kernel->strides == nullptr); + ICHECK(input->strides == nullptr); + ICHECK(transformed_kernel->strides == nullptr); if (bias) { - CHECK(bias->strides == nullptr); + ICHECK(bias->strides == nullptr); } - CHECK(TypeMatch(input->dtype, kDLFloat, 32)); - CHECK(TypeMatch(transformed_kernel->dtype, kDLFloat, 32)); + ICHECK(TypeMatch(input->dtype, kDLFloat, 32)); + ICHECK(TypeMatch(transformed_kernel->dtype, kDLFloat, 32)); if (bias) { - CHECK(TypeMatch(bias->dtype, kDLFloat, 32)); + ICHECK(TypeMatch(bias->dtype, kDLFloat, 32)); } - CHECK(TypeMatch(output->dtype, kDLFloat, 32)); + ICHECK(TypeMatch(output->dtype, kDLFloat, 32)); // Allocate a zero-bias if we don't pass one in. std::unique_ptr> zero_bias; @@ -183,7 +183,7 @@ TVM_REGISTER_GLOBAL("tvm.contrib.nnpack.convolution_inference_without_weight_tra algo, nnp_convolution_transform_strategy_reuse, input_channels, output_channels, input_size, input_padding, kernel_size, stride_size, nullptr, nullptr, nullptr, nullptr, nullptr, &workspace_size, nnp_activation_identity, nullptr, entry->threadpool, nullptr); - CHECK_EQ(status, nnp_status_success); + ICHECK_EQ(status, nnp_status_success); // Division with rounding up, in case size is not multiple of sizeof(float) const size_t workspace_elements = (workspace_size + sizeof(float) - 1) / sizeof(float); @@ -194,7 +194,7 @@ TVM_REGISTER_GLOBAL("tvm.contrib.nnpack.convolution_inference_without_weight_tra DeviceAPI* cpu_api = DeviceAPI::Get(ctx); void* workspace_buffer = cpu_api->AllocWorkspace(ctx, workspace_elements * sizeof(float), type_hint); - CHECK(workspace_buffer != nullptr); + ICHECK(workspace_buffer != nullptr); for (auto n = 0; n < input->shape[0]; ++n) { nnp_status status = nnp_convolution_inference( @@ -208,7 +208,7 @@ TVM_REGISTER_GLOBAL("tvm.contrib.nnpack.convolution_inference_without_weight_tra n * output->shape[1] * output->shape[2] * output->shape[3], workspace_buffer, &workspace_size, nnp_activation_identity, nullptr, entry->threadpool, nullptr); - CHECK_EQ(status, nnp_status_success); + ICHECK_EQ(status, nnp_status_success); } cpu_api->FreeWorkspace(ctx, workspace_buffer); @@ -218,7 +218,7 @@ TVM_REGISTER_GLOBAL("tvm.contrib.nnpack.convolution_inference_weight_transform") .set_body([](TVMArgs args, TVMRetValue* ret) { NNPackThreadLocalEntry* entry = NNPackThreadLocalEntry::ThreadLocal(); static std::once_flag flag; - std::call_once(flag, []() { CHECK_EQ(nnp_initialize(), nnp_status_success); }); + std::call_once(flag, []() { ICHECK_EQ(nnp_initialize(), nnp_status_success); }); DLTensor* kernel = args[0]; DLTensor* transformed_kernel = args[1]; // Dummy sizes @@ -231,15 +231,15 @@ TVM_REGISTER_GLOBAL("tvm.contrib.nnpack.convolution_inference_weight_transform") uint64_t algo_ = args[3]; nnp_convolution_algorithm algo = static_cast(algo_); - CHECK_EQ(kernel->ndim, 4); + ICHECK_EQ(kernel->ndim, 4); size_t input_channels = kernel->shape[1]; size_t output_channels = kernel->shape[0]; - CHECK_EQ(kernel->shape[2], 3); - CHECK_EQ(kernel->shape[3], 3); + ICHECK_EQ(kernel->shape[2], 3); + ICHECK_EQ(kernel->shape[3], 3); nnp_size kernel_size{static_cast(kernel->shape[2]), static_cast(kernel->shape[3])}; - CHECK(kernel->strides == nullptr); - CHECK(TypeMatch(kernel->dtype, kDLFloat, 32)); + ICHECK(kernel->strides == nullptr); + ICHECK(TypeMatch(kernel->dtype, kDLFloat, 32)); size_t transformed_kernel_size = 0; nnp_status status; @@ -248,9 +248,9 @@ TVM_REGISTER_GLOBAL("tvm.contrib.nnpack.convolution_inference_weight_transform") input_size, input_padding, kernel_size, stride_size, nullptr, nullptr, nullptr, nullptr, nullptr, &transformed_kernel_size, nnp_activation_identity, nullptr, entry->threadpool, nullptr); - CHECK_EQ(status, nnp_status_success); + ICHECK_EQ(status, nnp_status_success); - CHECK_LE(transformed_kernel_size, GetDataSize(*transformed_kernel)); + ICHECK_LE(transformed_kernel_size, GetDataSize(*transformed_kernel)); status = nnp_convolution_inference( algo, nnp_convolution_transform_strategy_precompute, input_channels, output_channels, @@ -258,7 +258,7 @@ TVM_REGISTER_GLOBAL("tvm.contrib.nnpack.convolution_inference_weight_transform") static_cast(kernel->data), nullptr, nullptr, static_cast(transformed_kernel->data), &transformed_kernel_size, nnp_activation_identity, nullptr, entry->threadpool, nullptr); - CHECK_EQ(status, nnp_status_success); + ICHECK_EQ(status, nnp_status_success); }); } // namespace contrib } // namespace tvm diff --git a/src/runtime/contrib/nnpack/fully_connected.cc b/src/runtime/contrib/nnpack/fully_connected.cc index 543d23958633..8b72eb38e08c 100644 --- a/src/runtime/contrib/nnpack/fully_connected.cc +++ b/src/runtime/contrib/nnpack/fully_connected.cc @@ -20,10 +20,10 @@ /*! * \file Use external nnpack library call. */ -#include #include #include #include +#include #include "nnpack_utils.h" @@ -42,17 +42,17 @@ TVM_REGISTER_GLOBAL("tvm.contrib.nnpack.fully_connected_inference") DLTensor* C = args[2]; NNPackConfig(args[3]); - CHECK_EQ(A->ndim, 1); - CHECK_EQ(B->ndim, 2); - CHECK_EQ(C->ndim, 1); - CHECK_EQ(B->shape[0], C->shape[0]); - CHECK_EQ(B->shape[1], A->shape[0]); - CHECK(C->strides == nullptr); - CHECK(B->strides == nullptr); - CHECK(A->strides == nullptr); - CHECK(TypeMatch(A->dtype, kDLFloat, 32)); - CHECK(TypeMatch(B->dtype, kDLFloat, 32)); - CHECK(TypeMatch(C->dtype, kDLFloat, 32)); + ICHECK_EQ(A->ndim, 1); + ICHECK_EQ(B->ndim, 2); + ICHECK_EQ(C->ndim, 1); + ICHECK_EQ(B->shape[0], C->shape[0]); + ICHECK_EQ(B->shape[1], A->shape[0]); + ICHECK(C->strides == nullptr); + ICHECK(B->strides == nullptr); + ICHECK(A->strides == nullptr); + ICHECK(TypeMatch(A->dtype, kDLFloat, 32)); + ICHECK(TypeMatch(B->dtype, kDLFloat, 32)); + ICHECK(TypeMatch(C->dtype, kDLFloat, 32)); nnp_fully_connected_inference(B->shape[1], B->shape[0], static_cast(A->data), static_cast(B->data), static_cast(C->data), diff --git a/src/runtime/contrib/nnpack/nnpack_utils.cc b/src/runtime/contrib/nnpack/nnpack_utils.cc index 91cf865128e9..2fd6f69bf20c 100644 --- a/src/runtime/contrib/nnpack/nnpack_utils.cc +++ b/src/runtime/contrib/nnpack/nnpack_utils.cc @@ -35,7 +35,7 @@ NNPackThreadLocalEntry* NNPackThreadLocalEntry::ThreadLocal() { bool NNPackConfig(uint64_t nthreads) { NNPackThreadLocalEntry* entry = NNPackThreadLocalEntry::ThreadLocal(); if (entry->threadpool && pthreadpool_get_threads_count(entry->threadpool) == nthreads) { - CHECK_NE(nthreads, 1); + ICHECK_NE(nthreads, 1); return true; } if (entry->threadpool) { @@ -46,7 +46,7 @@ bool NNPackConfig(uint64_t nthreads) { if (nthreads == 1) { // a null threadpool means the function is invoked on the calling thread, // which is the desired logic for nthreads == 1 - CHECK(!entry->threadpool); + ICHECK(!entry->threadpool); return true; } diff --git a/src/runtime/contrib/nnpack/nnpack_utils.h b/src/runtime/contrib/nnpack/nnpack_utils.h index bbb0d16bc868..231309baaa8e 100644 --- a/src/runtime/contrib/nnpack/nnpack_utils.h +++ b/src/runtime/contrib/nnpack/nnpack_utils.h @@ -22,11 +22,11 @@ */ #ifndef TVM_RUNTIME_CONTRIB_NNPACK_NNPACK_UTILS_H_ #define TVM_RUNTIME_CONTRIB_NNPACK_NNPACK_UTILS_H_ -#include #include #include #include #include +#include namespace tvm { namespace contrib { diff --git a/src/runtime/contrib/onnx/onnx_module.cc b/src/runtime/contrib/onnx/onnx_module.cc index 9574b8674c8b..b235d63dbc58 100644 --- a/src/runtime/contrib/onnx/onnx_module.cc +++ b/src/runtime/contrib/onnx/onnx_module.cc @@ -53,8 +53,8 @@ class ONNXSourceModuleNode : public runtime::ModuleNode { std::string GetSource(const std::string& format) final { return code_; } void SaveToFile(const std::string& path, const std::string& format) final { - CHECK_EQ(format, "onnx") << "Can only save to onnx format"; - CHECK_NE(code_.length(), 0); + ICHECK_EQ(format, "onnx") << "Can only save to onnx format"; + ICHECK_NE(code_.length(), 0); const PackedFunc* to_onnx_ = runtime::Registry::Get("relay.ext.onnx.save_to_file"); (*to_onnx_)(code_, path, format); } diff --git a/src/runtime/contrib/random/mt_random_engine.cc b/src/runtime/contrib/random/mt_random_engine.cc index 8c20f0700ee7..49bc056dcafb 100644 --- a/src/runtime/contrib/random/mt_random_engine.cc +++ b/src/runtime/contrib/random/mt_random_engine.cc @@ -21,9 +21,9 @@ * \file random/mt_random_engine.cc * \brief mt19937 random engine */ -#include #include #include +#include #include #include @@ -71,8 +71,8 @@ class RandomEngine { * \brief Fills a tensor with values drawn from Unif(low, high) */ void SampleUniform(DLTensor* data, float low, float high) { - CHECK_GT(high, low) << "high must be bigger than low"; - CHECK(data->strides == nullptr); + ICHECK_GT(high, low) << "high must be bigger than low"; + ICHECK(data->strides == nullptr); DLDataType dtype = data->dtype; int64_t size = 1; @@ -80,7 +80,7 @@ class RandomEngine { size *= data->shape[i]; } - CHECK(dtype.code == kDLFloat && dtype.bits == 32 && dtype.lanes == 1); + ICHECK(dtype.code == kDLFloat && dtype.bits == 32 && dtype.lanes == 1); if (data->ctx.device_type == kDLCPU) { std::uniform_real_distribution uniform_dist(low, high); @@ -95,8 +95,8 @@ class RandomEngine { * \brief Fills a tensor with values drawn from Normal(loc, scale**2) */ void SampleNormal(DLTensor* data, float loc, float scale) { - CHECK_GT(scale, 0) << "standard deviation must be positive"; - CHECK(data->strides == nullptr); + ICHECK_GT(scale, 0) << "standard deviation must be positive"; + ICHECK(data->strides == nullptr); DLDataType dtype = data->dtype; int64_t size = 1; @@ -104,7 +104,7 @@ class RandomEngine { size *= data->shape[i]; } - CHECK(dtype.code == kDLFloat && dtype.bits == 32 && dtype.lanes == 1); + ICHECK(dtype.code == kDLFloat && dtype.bits == 32 && dtype.lanes == 1); if (data->ctx.device_type == kDLCPU) { std::normal_distribution normal_dist(loc, scale); diff --git a/src/runtime/contrib/random/random.cc b/src/runtime/contrib/random/random.cc index 14bdd267d38c..edcd20883369 100644 --- a/src/runtime/contrib/random/random.cc +++ b/src/runtime/contrib/random/random.cc @@ -20,10 +20,10 @@ /*! * \file External random functions for tensor. */ -#include #include #include #include +#include #include @@ -73,8 +73,8 @@ TVM_REGISTER_GLOBAL("tvm.contrib.random.randint").set_body([](TVMArgs args, TVMR int64_t low = args[0]; int64_t high = args[1]; DLTensor* out = args[2]; - CHECK_GT(high, low) << "high must be bigger than low"; - CHECK(out->strides == nullptr); + ICHECK_GT(high, low) << "high must be bigger than low"; + ICHECK(out->strides == nullptr); DLDataType dtype = out->dtype; int64_t size = 1; diff --git a/src/runtime/contrib/rocblas/rocblas.cc b/src/runtime/contrib/rocblas/rocblas.cc index bca00a591d48..dca1ebc6ed83 100644 --- a/src/runtime/contrib/rocblas/rocblas.cc +++ b/src/runtime/contrib/rocblas/rocblas.cc @@ -22,9 +22,9 @@ */ #include "rocblas.h" -#include #include #include +#include namespace tvm { namespace contrib { @@ -56,15 +56,15 @@ TVM_REGISTER_GLOBAL("tvm.contrib.rocblas.matmul").set_body([](TVMArgs args, TVMR bool transa = args[3]; bool transb = args[4]; // call gemm for simple compact code. - CHECK_EQ(A->ndim, 2); - CHECK_EQ(B->ndim, 2); - CHECK_EQ(C->ndim, 2); - CHECK(C->strides == nullptr); - CHECK(B->strides == nullptr); - CHECK(A->strides == nullptr); - CHECK(TypeMatch(A->dtype, kDLFloat, 32)); - CHECK(TypeMatch(B->dtype, kDLFloat, 32)); - CHECK(TypeMatch(C->dtype, kDLFloat, 32)); + ICHECK_EQ(A->ndim, 2); + ICHECK_EQ(B->ndim, 2); + ICHECK_EQ(C->ndim, 2); + ICHECK(C->strides == nullptr); + ICHECK(B->strides == nullptr); + ICHECK(A->strides == nullptr); + ICHECK(TypeMatch(A->dtype, kDLFloat, 32)); + ICHECK(TypeMatch(B->dtype, kDLFloat, 32)); + ICHECK(TypeMatch(C->dtype, kDLFloat, 32)); rocblas_handle handle; CHECK_ROCBLAS_ERROR(rocblas_create_handle(&handle)); @@ -97,12 +97,12 @@ TVM_REGISTER_GLOBAL("tvm.contrib.rocblas.batch_matmul") bool transa = args[3]; bool transb = args[4]; // call gemm for simple compact code. - CHECK_EQ(A->ndim, 3); - CHECK_EQ(B->ndim, 3); - CHECK_EQ(C->ndim, 3); - CHECK(TypeMatch(A->dtype, kDLFloat, 32)); - CHECK(TypeMatch(B->dtype, kDLFloat, 32)); - CHECK(TypeMatch(C->dtype, kDLFloat, 32)); + ICHECK_EQ(A->ndim, 3); + ICHECK_EQ(B->ndim, 3); + ICHECK_EQ(C->ndim, 3); + ICHECK(TypeMatch(A->dtype, kDLFloat, 32)); + ICHECK(TypeMatch(B->dtype, kDLFloat, 32)); + ICHECK(TypeMatch(C->dtype, kDLFloat, 32)); rocblas_handle handle; CHECK_ROCBLAS_ERROR(rocblas_create_handle(&handle)); diff --git a/src/runtime/contrib/sort/sort.cc b/src/runtime/contrib/sort/sort.cc index 9543e4b4c64e..31cf38d7d7a5 100644 --- a/src/runtime/contrib/sort/sort.cc +++ b/src/runtime/contrib/sort/sort.cc @@ -68,15 +68,15 @@ TVM_REGISTER_GLOBAL("tvm.contrib.sort.argsort_nms").set_body([](TVMArgs args, TV } // Currently only supports input dtype to be float32. - CHECK_EQ(dtype.code, 2) << "Currently only supports input dtype " - "to be float."; + ICHECK_EQ(dtype.code, 2) << "Currently only supports input dtype " + "to be float."; #if (__ARM_FEATURE_FP16_SCALAR_ARITHMETIC != 1) - CHECK_EQ(dtype.bits, 32) << "Currently only supports input dtype " - "to be float32."; + ICHECK_EQ(dtype.bits, 32) << "Currently only supports input dtype " + "to be float32."; #endif - CHECK_LT(axis, input->ndim) << "Axis out of boundary for " - "input ndim " - << input->ndim; + ICHECK_LT(axis, input->ndim) << "Axis out of boundary for " + "input ndim " + << input->ndim; for (int i = 0; i < input->ndim; ++i) { if (i < axis) { @@ -175,9 +175,9 @@ TVM_REGISTER_GLOBAL("tvm.contrib.sort.argsort").set_body([](TVMArgs args, TVMRet if (axis < 0) { axis = input->ndim + axis; } - CHECK_LT(axis, input->ndim) << "Axis out of boundary for " - "input ndim " - << input->ndim; + ICHECK_LT(axis, input->ndim) << "Axis out of boundary for " + "input ndim " + << input->ndim; auto data_dtype = DLDataType2String(input->dtype); auto out_dtype = DLDataType2String(output->dtype); @@ -322,7 +322,7 @@ TVM_REGISTER_GLOBAL("tvm.contrib.sort.topk").set_body([](TVMArgs args, TVMRetVal if (axis < 0) { axis = input->ndim + axis; } - CHECK(axis >= 0 && axis < input->ndim) << "Axis out of boundary for input ndim " << input->ndim; + ICHECK(axis >= 0 && axis < input->ndim) << "Axis out of boundary for input ndim " << input->ndim; auto data_dtype = DLDataType2String(input->dtype); auto out_dtype = (indices_out == nullptr) ? "int64" : DLDataType2String(indices_out->dtype); diff --git a/src/runtime/contrib/tensorrt/tensorrt_builder.cc b/src/runtime/contrib/tensorrt/tensorrt_builder.cc index bf0dbfe724ed..d308200eba05 100644 --- a/src/runtime/contrib/tensorrt/tensorrt_builder.cc +++ b/src/runtime/contrib/tensorrt/tensorrt_builder.cc @@ -67,7 +67,7 @@ void TensorRTBuilder::AddInput(int nid, const JSONGraphNode& node) { auto node_name = node.GetOpName(); auto shapes = node.GetOpShape(); auto dtypes = node.GetOpDataType(); - CHECK_EQ(shapes.size(), dtypes.size()); + ICHECK_EQ(shapes.size(), dtypes.size()); node_output_map_[nid] = {}; for (size_t i = 0; i < shapes.size(); ++i) { const std::string name = node_name + "_" + std::to_string(i); @@ -77,7 +77,7 @@ void TensorRTBuilder::AddInput(int nid, const JSONGraphNode& node) { shape.erase(shape.begin()); } nvinfer1::Dims dims = VectorToTrtDims(shape); - CHECK(TypeMatch(dtypes[i], kDLFloat, 32)) << "Only FP32 inputs are supported."; + ICHECK(TypeMatch(dtypes[i], kDLFloat, 32)) << "Only FP32 inputs are supported."; auto input_tensor = network_->addInput(name.c_str(), nvinfer1::DataType::kFLOAT, dims); node_output_map_[nid].push_back(TensorRTOpInput(input_tensor)); network_input_names_.push_back(input_tensor->getName()); @@ -96,7 +96,7 @@ void TensorRTBuilder::AddConstant(int nid, const DLTensor* data) { void TensorRTBuilder::AddOutput(const JSONGraphNodeEntry& node) { auto it = node_output_map_.find(node.id_); - CHECK(it != node_output_map_.end()) << "Output was not found."; + ICHECK(it != node_output_map_.end()) << "Output was not found."; auto out_tensor = it->second[node.index_].tensor; std::string name = "tensorrt_output_" + std::to_string(network_output_names_.size()); out_tensor->setName(name.c_str()); @@ -108,14 +108,14 @@ void TensorRTBuilder::AddLayer(int nid, const JSONGraphNode& node) { TensorRTOpConverterParams params(network_, node, &trt_weights_); // Look up converter. auto it = GetOpConverters()->find(params.op_name); - CHECK(it != GetOpConverters()->end()) + ICHECK(it != GetOpConverters()->end()) << "Unsupported operator conversion to TRT, op name: " << params.op_name; const auto converter = it->second; // Get inputs. for (size_t i = 0; i < node.GetInputs().size(); ++i) { auto in_node = node.GetInputs()[i]; auto it = node_output_map_.find(in_node.id_); - CHECK(it != node_output_map_.end()) << "Input was not found."; + ICHECK(it != node_output_map_.end()) << "Input was not found."; auto input = it->second[in_node.index_]; if (!converter->variable_input_count) { if (converter->input_types[i] == kTensor && input.type == kWeight) { @@ -127,7 +127,7 @@ void TensorRTBuilder::AddLayer(int nid, const JSONGraphNode& node) { } params.inputs.push_back(input); } - CHECK(converter->variable_input_count || converter->input_types.size() == params.inputs.size()) + ICHECK(converter->variable_input_count || converter->input_types.size() == params.inputs.size()) << "Op expected a different number of inputs."; // Convert op to TRT. @@ -165,7 +165,7 @@ TensorRTEngineAndContext TensorRTBuilder::BuildEngine() { #else nvinfer1::ICudaEngine* engine = builder_->buildCudaEngine(*network_); #endif - CHECK_EQ(engine->getNbBindings(), network_input_names_.size() + network_output_names_.size()); + ICHECK_EQ(engine->getNbBindings(), network_input_names_.size() + network_output_names_.size()); nvinfer1::IExecutionContext* context = engine->createExecutionContext(); CleanUp(); return {engine, context, network_input_names_, network_output_names_}; @@ -173,9 +173,9 @@ TensorRTEngineAndContext TensorRTBuilder::BuildEngine() { nvinfer1::Weights TensorRTBuilder::GetDLTensorAsWeights(const DLTensor* dptr, DLDeviceType src_device) { - CHECK_EQ(dptr->ctx.device_type, src_device); - CHECK(static_cast(dptr->dtype.code) == kDLFloat || - static_cast(dptr->dtype.code) == kDLInt); + ICHECK_EQ(dptr->ctx.device_type, src_device); + ICHECK(static_cast(dptr->dtype.code) == kDLFloat || + static_cast(dptr->dtype.code) == kDLInt); const auto trt_dtype = static_cast(dptr->dtype.code) == kDLFloat ? nvinfer1::DataType::kFLOAT : nvinfer1::DataType::kINT32; @@ -185,12 +185,12 @@ nvinfer1::Weights TensorRTBuilder::GetDLTensorAsWeights(const DLTensor* dptr, for (tvm_index_t i = 0; i < dptr->ndim; ++i) { count *= dptr->shape[i]; } - CHECK_EQ(count * 4, weight_bytes); + ICHECK_EQ(count * 4, weight_bytes); weight.count = count; weight.values = new float[count]; - CHECK_EQ(TVMArrayCopyToBytes(const_cast(dptr), const_cast(weight.values), - weight_bytes), - 0) + ICHECK_EQ(TVMArrayCopyToBytes(const_cast(dptr), const_cast(weight.values), + weight_bytes), + 0) << TVMGetLastError(); trt_weights_.push_back(weight); return weight; diff --git a/src/runtime/contrib/tensorrt/tensorrt_logger.h b/src/runtime/contrib/tensorrt/tensorrt_logger.h index 53b6dfeea763..087cb010189c 100644 --- a/src/runtime/contrib/tensorrt/tensorrt_logger.h +++ b/src/runtime/contrib/tensorrt/tensorrt_logger.h @@ -25,7 +25,7 @@ #ifndef TVM_RUNTIME_CONTRIB_TENSORRT_TENSORRT_LOGGER_H_ #define TVM_RUNTIME_CONTRIB_TENSORRT_TENSORRT_LOGGER_H_ -#include +#include #include "NvInfer.h" #include "tensorrt_utils.h" diff --git a/src/runtime/contrib/tensorrt/tensorrt_ops.cc b/src/runtime/contrib/tensorrt/tensorrt_ops.cc index a1da6c39f68e..4c5eeea1e644 100644 --- a/src/runtime/contrib/tensorrt/tensorrt_ops.cc +++ b/src/runtime/contrib/tensorrt/tensorrt_ops.cc @@ -47,7 +47,7 @@ nvinfer1::ITensor* TensorRTOpConverter::Reshape(TensorRTOpConverterParams* param nvinfer1::ITensor* input, const std::vector& new_shape) const { auto layer = params->network->addShuffle(*input); - CHECK(layer != nullptr); + ICHECK(layer != nullptr); layer->setReshapeDimensions(VectorToTrtDims(new_shape)); return layer->getOutput(0); } @@ -56,17 +56,17 @@ nvinfer1::ITensor* TensorRTOpConverter::Transpose(TensorRTOpConverterParams* par nvinfer1::ITensor* input, const std::vector& order) const { auto layer = params->network->addShuffle(*input); - CHECK(layer != nullptr); + ICHECK(layer != nullptr); nvinfer1::Permutation perm; if (TRT_HAS_IMPLICIT_BATCH(params)) { // Batch dimension cannot be modified. - CHECK_EQ(input->getDimensions().nbDims, order.size() - 1); - CHECK_EQ(order[0], 0); + ICHECK_EQ(input->getDimensions().nbDims, order.size() - 1); + ICHECK_EQ(order[0], 0); for (size_t i = 0; i < order.size(); ++i) { perm.order[i] = order[i + 1] - 1; } } else { - CHECK_EQ(input->getDimensions().nbDims, order.size()); + ICHECK_EQ(input->getDimensions().nbDims, order.size()); for (size_t i = 0; i < order.size(); ++i) { perm.order[i] = order[i]; } @@ -81,11 +81,11 @@ int TensorRTOpConverter::ConvertAxis(TensorRTOpConverterParams* params, int axis if (TRT_HAS_IMPLICIT_BATCH(params)) { input_rank += 1; } - CHECK(axis >= -input_rank && axis < input_rank); + ICHECK(axis >= -input_rank && axis < input_rank); if (axis < 0) axis += input_rank; if (TRT_HAS_IMPLICIT_BATCH(params)) { // Can't modify batch dimenson. - CHECK_NE(axis, 0); + ICHECK_NE(axis, 0); // Subtract 1 for implicit batch dim. axis -= 1; } @@ -107,7 +107,7 @@ nvinfer1::ITensor* TensorRTOpConverter::CreateScalar( void TensorRTOpConverter::GetPadding(const std::vector& padding, bool* use_asymmetric_padding, nvinfer1::DimsHW* prepadding, nvinfer1::DimsHW* postpadding) const { - CHECK(padding.size() == 1 || padding.size() == 2 || padding.size() == 4); + ICHECK(padding.size() == 1 || padding.size() == 2 || padding.size() == 4); if (padding.size() == 4) { // four int : padding width in the order of (top, left, bottom, right). *prepadding = nvinfer1::DimsHW(std::stoi(padding[0]), std::stoi(padding[1])); @@ -129,7 +129,7 @@ void TensorRTOpConverter::GetPadding(const std::vector& padding, void TensorRTOpConverter::GetPadding3D(const std::vector& padding, bool* use_asymmetric_padding, nvinfer1::Dims* prepadding, nvinfer1::Dims* postpadding) const { - CHECK(padding.size() == 1 || padding.size() == 3 || padding.size() == 6); + ICHECK(padding.size() == 1 || padding.size() == 3 || padding.size() == 6); if (padding.size() == 6) { // six int : padding width in the order of (front, top, left, back, bottom, right) *prepadding = @@ -167,7 +167,7 @@ class ActivationOpConverter : public TensorRTOpConverter { #endif }; auto it = op_map.find(params->op_name); - CHECK(it != op_map.end()) << "Unsupported activation type " << params->op_name; + ICHECK(it != op_map.end()) << "Unsupported activation type " << params->op_name; nvinfer1::IActivationLayer* act_layer = params->network->addActivation(*params->inputs.at(0).tensor, it->second); #if TRT_VERSION_GE(5, 1, 5) @@ -181,7 +181,7 @@ class ActivationOpConverter : public TensorRTOpConverter { act_layer->setAlpha(alpha); } #endif - CHECK(act_layer != nullptr); + ICHECK(act_layer != nullptr); params->outputs.push_back(act_layer->getOutput(0)); } }; @@ -200,7 +200,7 @@ class ElementWiseBinaryOpConverter : public TensorRTOpConverter { {"maximum", nvinfer1::ElementWiseOperation::kMAX}, {"minimum", nvinfer1::ElementWiseOperation::kMIN}}; auto it = op_map.find(params->op_name); - CHECK(it != op_map.end()) << "Unsupported elementwise type " << params->op_name; + ICHECK(it != op_map.end()) << "Unsupported elementwise type " << params->op_name; // Broadcast auto input0 = params->inputs.at(0).tensor; auto input0_dims = TrtDimsToVector(input0->getDimensions()); @@ -221,7 +221,7 @@ class ElementWiseBinaryOpConverter : public TensorRTOpConverter { nvinfer1::IElementWiseLayer* elemwise_layer = params->network->addElementWise(*input0, *input1, it->second); - CHECK(elemwise_layer != nullptr); + ICHECK(elemwise_layer != nullptr); params->outputs.push_back(elemwise_layer->getOutput(0)); } }; @@ -234,10 +234,10 @@ class Conv2DOpConverter : public TensorRTOpConverter { auto input_tensor = params->inputs.at(0).tensor; auto input_dims = TrtDimsToVector(input_tensor->getDimensions()); auto weight_shape = params->inputs.at(1).weight_shape; - CHECK_EQ(params->node.GetAttr>("data_layout")[0], "NCHW"); - CHECK(params->node.GetAttr>("out_layout")[0] == "" || - params->node.GetAttr>("out_layout")[0] == "NCHW"); - CHECK_EQ(params->node.GetAttr>("kernel_layout")[0], "OIHW"); + ICHECK_EQ(params->node.GetAttr>("data_layout")[0], "NCHW"); + ICHECK(params->node.GetAttr>("out_layout")[0] == "" || + params->node.GetAttr>("out_layout")[0] == "NCHW"); + ICHECK_EQ(params->node.GetAttr>("kernel_layout")[0], "OIHW"); auto str_strides = params->node.GetAttr>("strides"); auto str_dilation = params->node.GetAttr>("dilation"); auto str_padding = params->node.GetAttr>("padding"); @@ -251,7 +251,7 @@ class Conv2DOpConverter : public TensorRTOpConverter { #if !TRT_VERSION_GE(5, 1, 5) if (use_asymmetric_padding) { auto pad_layer = params->network->addPadding(*input_tensor, prepadding, postpadding); - CHECK(pad_layer != nullptr); + ICHECK(pad_layer != nullptr); input_tensor = pad_layer->getOutput(0); // No need for conv op to do any padding. use_asymmetric_padding = false; @@ -263,7 +263,7 @@ class Conv2DOpConverter : public TensorRTOpConverter { nvinfer1::Weights bias{nvinfer1::DataType::kFLOAT, nullptr, 0}; auto conv_layer = params->network->addConvolution(*input_tensor, channels, kernel_size, params->inputs.at(1).weight, bias); - CHECK(conv_layer != nullptr); + ICHECK(conv_layer != nullptr); if (use_asymmetric_padding) { #if TRT_VERSION_GE(5, 1, 5) conv_layer->setPrePadding(prepadding); @@ -272,10 +272,10 @@ class Conv2DOpConverter : public TensorRTOpConverter { } else { conv_layer->setPadding(prepadding); } - CHECK_EQ(str_strides.size(), 2); + ICHECK_EQ(str_strides.size(), 2); const auto strides = nvinfer1::DimsHW(std::stoi(str_strides[0]), std::stoi(str_strides[1])); conv_layer->setStride(strides); - CHECK_EQ(str_dilation.size(), 2); + ICHECK_EQ(str_dilation.size(), 2); const auto dilation = nvinfer1::DimsHW(std::stoi(str_dilation[0]), std::stoi(str_dilation[1])); conv_layer->setDilation(dilation); conv_layer->setNbGroups(groups); @@ -292,10 +292,10 @@ class Conv3DOpConverter : public TensorRTOpConverter { auto input_tensor = params->inputs.at(0).tensor; auto input_dims = TrtDimsToVector(input_tensor->getDimensions()); auto weight_shape = params->inputs.at(1).weight_shape; - CHECK_EQ(params->node.GetAttr>("data_layout")[0], "NCDHW"); - CHECK(params->node.GetAttr>("out_layout")[0] == "" || - params->node.GetAttr>("out_layout")[0] == "NCDHW"); - CHECK_EQ(params->node.GetAttr>("kernel_layout")[0], "OIDHW"); + ICHECK_EQ(params->node.GetAttr>("data_layout")[0], "NCDHW"); + ICHECK(params->node.GetAttr>("out_layout")[0] == "" || + params->node.GetAttr>("out_layout")[0] == "NCDHW"); + ICHECK_EQ(params->node.GetAttr>("kernel_layout")[0], "OIDHW"); auto str_strides = params->node.GetAttr>("strides"); auto str_dilation = params->node.GetAttr>("dilation"); auto str_padding = params->node.GetAttr>("padding"); @@ -311,18 +311,18 @@ class Conv3DOpConverter : public TensorRTOpConverter { nvinfer1::Weights bias{nvinfer1::DataType::kFLOAT, nullptr, 0}; auto conv_layer = params->network->addConvolutionNd(*input_tensor, num_outputs, kernel_size, params->inputs.at(1).weight, bias); - CHECK(conv_layer != nullptr); + ICHECK(conv_layer != nullptr); if (use_asymmetric_padding) { conv_layer->setPrePadding(prepadding); conv_layer->setPostPadding(postpadding); } else { conv_layer->setPaddingNd(prepadding); } - CHECK_EQ(str_strides.size(), 3); + ICHECK_EQ(str_strides.size(), 3); const auto strides = nvinfer1::Dims3(std::stoi(str_strides[0]), std::stoi(str_strides[1]), std::stoi(str_strides[2])); conv_layer->setStrideNd(strides); - CHECK_EQ(str_dilation.size(), 3); + ICHECK_EQ(str_dilation.size(), 3); const auto dilation = nvinfer1::Dims3(std::stoi(str_dilation[0]), std::stoi(str_dilation[1]), std::stoi(str_dilation[2])); conv_layer->setDilationNd(dilation); @@ -339,7 +339,7 @@ class DenseOpConverter : public TensorRTOpConverter { void Convert(TensorRTOpConverterParams* params) const { auto input_tensor = params->inputs.at(0).tensor; auto input_dims = TrtDimsToVector(input_tensor->getDimensions()); - CHECK(input_dims.size() > 0 && input_dims.size() <= 3); + ICHECK(input_dims.size() > 0 && input_dims.size() <= 3); const size_t required_rank = TRT_HAS_IMPLICIT_BATCH(params) ? 3 : 4; const bool need_reshape_on_input = input_dims.size() != required_rank; if (need_reshape_on_input) { @@ -349,12 +349,12 @@ class DenseOpConverter : public TensorRTOpConverter { input_tensor = Reshape(params, input_tensor, new_shape); } // Weights are in KC format. - CHECK_EQ(params->inputs.at(1).weight_shape.size(), 2); + ICHECK_EQ(params->inputs.at(1).weight_shape.size(), 2); const int num_units = params->inputs.at(1).weight_shape[0]; nvinfer1::Weights bias{nvinfer1::DataType::kFLOAT, nullptr, 0}; nvinfer1::IFullyConnectedLayer* fc_layer = params->network->addFullyConnected( *input_tensor, num_units, params->inputs.at(1).weight, bias); - CHECK(fc_layer != nullptr); + ICHECK(fc_layer != nullptr); auto output_tensor = fc_layer->getOutput(0); if (need_reshape_on_input) { // Remove added dims. @@ -375,14 +375,14 @@ class BatchNormOpConverter : public TensorRTOpConverter { auto beta = params->inputs.at(2).weight; auto mean = params->inputs.at(3).weight; auto var = params->inputs.at(4).weight; - CHECK_EQ(gamma.count, beta.count); - CHECK_EQ(gamma.count, mean.count); - CHECK_EQ(gamma.count, var.count); + ICHECK_EQ(gamma.count, beta.count); + ICHECK_EQ(gamma.count, mean.count); + ICHECK_EQ(gamma.count, var.count); const float epsilon = std::stof(params->node.GetAttr>("epsilon")[0]); const int axis = std::stoi(params->node.GetAttr>("axis")[0]); const bool scale = std::stoi(params->node.GetAttr>("scale")[0]); const bool center = std::stoi(params->node.GetAttr>("center")[0]); - CHECK(axis == 1 || axis == 3); + ICHECK(axis == 1 || axis == 3); const bool need_transpose = axis == 3; void* weight_scale_ptr = new float[gamma.count]; @@ -415,7 +415,7 @@ class BatchNormOpConverter : public TensorRTOpConverter { } nvinfer1::IScaleLayer* scale_layer = params->network->addScale( *input, nvinfer1::ScaleMode::kCHANNEL, weight_shift, weight_scale, power); - CHECK(scale_layer != nullptr); + ICHECK(scale_layer != nullptr); auto output = scale_layer->getOutput(0); if (need_transpose) { output = Transpose(params, output, {0, 2, 3, 1}); @@ -448,7 +448,7 @@ class SoftmaxOpConverter : public TensorRTOpConverter { const int axis = ConvertAxis(params, original_axis, input_rank); nvinfer1::ISoftMaxLayer* softmax_layer = params->network->addSoftMax(*input); softmax_layer->setAxes(1 << axis); - CHECK(softmax_layer != nullptr); + ICHECK(softmax_layer != nullptr); params->outputs.push_back(softmax_layer->getOutput(0)); } }; @@ -463,8 +463,8 @@ class PoolingOpConverter : public TensorRTOpConverter { {"nn.max_pool2d", nvinfer1::PoolingType::kMAX}, {"nn.avg_pool2d", nvinfer1::PoolingType::kAVERAGE}}; auto it = op_map.find(params->op_name); - CHECK(it != op_map.end()) << "Unsupported pooling type " << params->op_name << " in TensorRT"; - CHECK_EQ(params->node.GetAttr>("layout")[0], "NCHW"); + ICHECK(it != op_map.end()) << "Unsupported pooling type " << params->op_name << " in TensorRT"; + ICHECK_EQ(params->node.GetAttr>("layout")[0], "NCHW"); auto str_pool_size = params->node.GetAttr>("pool_size"); auto str_padding = params->node.GetAttr>("padding"); auto str_strides = params->node.GetAttr>("strides"); @@ -478,7 +478,7 @@ class PoolingOpConverter : public TensorRTOpConverter { #if !TRT_VERSION_GE(5, 1, 5) if (use_asymmetric_padding) { auto pad_layer = params->network->addPadding(*input, prepadding, postpadding); - CHECK(pad_layer != nullptr); + ICHECK(pad_layer != nullptr); input = pad_layer->getOutput(0); // No need for pooling op to do any padding. use_asymmetric_padding = false; @@ -489,7 +489,7 @@ class PoolingOpConverter : public TensorRTOpConverter { nvinfer1::DimsHW window_size = nvinfer1::DimsHW(std::stoi(str_pool_size[0]), std::stoi(str_pool_size[1])); auto pool_layer = params->network->addPooling(*input, it->second, window_size); - CHECK(pool_layer != nullptr); + ICHECK(pool_layer != nullptr); nvinfer1::DimsHW strides = nvinfer1::DimsHW(std::stoi(str_strides[0]), std::stoi(str_strides[1])); pool_layer->setStride(strides); @@ -519,7 +519,7 @@ class PoolingOpConverter : public TensorRTOpConverter { pool_layer->setPaddingMode(nvinfer1::PaddingMode::kEXPLICIT_ROUND_UP); } #else - CHECK(!ceil_mode); + ICHECK(!ceil_mode); #endif params->outputs.push_back(pool_layer->getOutput(0)); } @@ -536,8 +536,8 @@ class Pooling3DOpConverter : public TensorRTOpConverter { {"nn.max_pool3d", nvinfer1::PoolingType::kMAX}, {"nn.avg_pool3d", nvinfer1::PoolingType::kAVERAGE}}; auto it = op_map.find(params->op_name); - CHECK(it != op_map.end()) << "Unsupported pooling type " << params->op_name << " in TensorRT"; - CHECK_EQ(params->node.GetAttr>("layout")[0], "NCDHW"); + ICHECK(it != op_map.end()) << "Unsupported pooling type " << params->op_name << " in TensorRT"; + ICHECK_EQ(params->node.GetAttr>("layout")[0], "NCDHW"); auto str_pool_size = params->node.GetAttr>("pool_size"); auto str_padding = params->node.GetAttr>("padding"); auto str_strides = params->node.GetAttr>("strides"); @@ -548,7 +548,7 @@ class Pooling3DOpConverter : public TensorRTOpConverter { nvinfer1::Dims window_size = nvinfer1::Dims3( std::stoi(str_pool_size[0]), std::stoi(str_pool_size[1]), std::stoi(str_pool_size[2])); auto pool_layer = params->network->addPoolingNd(*input, it->second, window_size); - CHECK(pool_layer != nullptr); + ICHECK(pool_layer != nullptr); nvinfer1::Dims strides = nvinfer1::Dims3(std::stoi(str_strides[0]), std::stoi(str_strides[1]), std::stoi(str_strides[2])); pool_layer->setStrideNd(strides); @@ -582,13 +582,13 @@ class GlobalPoolingOpConverter : public TensorRTOpConverter { {"nn.global_max_pool2d", nvinfer1::PoolingType::kMAX}, {"nn.global_avg_pool2d", nvinfer1::PoolingType::kAVERAGE}}; auto it = op_map.find(params->op_name); - CHECK(it != op_map.end()) << "Unsupported pooling type " << params->op_name << " in TensorRT"; - CHECK_EQ(params->node.GetAttr>("layout")[0], "NCHW"); + ICHECK(it != op_map.end()) << "Unsupported pooling type " << params->op_name << " in TensorRT"; + ICHECK_EQ(params->node.GetAttr>("layout")[0], "NCHW"); const int h = TRT_HAS_IMPLICIT_BATCH(params) ? input_dims[1] : input_dims[2]; const int w = TRT_HAS_IMPLICIT_BATCH(params) ? input_dims[2] : input_dims[3]; auto pool_layer = params->network->addPooling(*input_tensor, it->second, nvinfer1::DimsHW(h, w)); - CHECK(pool_layer != nullptr); + ICHECK(pool_layer != nullptr); params->outputs.push_back(pool_layer->getOutput(0)); } }; @@ -650,10 +650,10 @@ class UnaryOpConverter : public TensorRTOpConverter { #endif }; auto it = op_map.find(params->op_name); - CHECK(it != op_map.end()) << "Unsupported unary type " << params->op_name; + ICHECK(it != op_map.end()) << "Unsupported unary type " << params->op_name; nvinfer1::IUnaryLayer* unary_layer = params->network->addUnary(*params->inputs.at(0).tensor, it->second); - CHECK(unary_layer != nullptr); + ICHECK(unary_layer != nullptr); params->outputs.push_back(unary_layer->getOutput(0)); } }; @@ -664,12 +664,12 @@ class ConcatOpConverter : public TensorRTOpConverter { void Convert(TensorRTOpConverterParams* params) const { const int num_inputs = params->inputs.size(); - CHECK_GT(num_inputs, 0); + ICHECK_GT(num_inputs, 0); const int input_rank = params->inputs[0].tensor->getDimensions().nbDims; std::vector input_tensors; for (auto input : params->inputs) { - CHECK(input.type == kTensor); - CHECK_EQ(input_rank, input.tensor->getDimensions().nbDims); + ICHECK(input.type == kTensor); + ICHECK_EQ(input_rank, input.tensor->getDimensions().nbDims); input_tensors.push_back(input.tensor); } @@ -678,7 +678,7 @@ class ConcatOpConverter : public TensorRTOpConverter { nvinfer1::IConcatenationLayer* concat_layer = params->network->addConcatenation(input_tensors.data(), input_tensors.size()); - CHECK(concat_layer != nullptr); + ICHECK(concat_layer != nullptr); concat_layer->setAxis(axis); params->outputs.push_back(concat_layer->getOutput(0)); } @@ -692,7 +692,7 @@ class BiasAddOpConverter : public TensorRTOpConverter { auto input_tensor = params->inputs.at(0).tensor; auto input_dims = TrtDimsToVector(input_tensor->getDimensions()); const size_t required_rank = TRT_HAS_IMPLICIT_BATCH(params) ? 3 : 4; - CHECK(input_dims.size() > 0 && input_dims.size() <= required_rank); + ICHECK(input_dims.size() > 0 && input_dims.size() <= required_rank); const bool need_reshape_on_input = input_dims.size() != required_rank; if (need_reshape_on_input) { // Add dims of size 1 until rank is required_rank. @@ -705,7 +705,7 @@ class BiasAddOpConverter : public TensorRTOpConverter { nvinfer1::Weights power{nvinfer1::DataType::kFLOAT, nullptr, 0}; nvinfer1::IScaleLayer* scale_layer = params->network->addScale( *input_tensor, nvinfer1::ScaleMode::kCHANNEL, params->inputs.at(1).weight, shift, power); - CHECK(scale_layer != nullptr); + ICHECK(scale_layer != nullptr); auto output_tensor = scale_layer->getOutput(0); if (need_reshape_on_input) { // Remove added dims. @@ -722,12 +722,12 @@ class Conv2DTransposeOpConverter : public TensorRTOpConverter { void Convert(TensorRTOpConverterParams* params) const { auto input_tensor = params->inputs.at(0).tensor; auto weight_shape = params->inputs.at(1).weight_shape; - CHECK_EQ(params->node.GetAttr>("data_layout")[0], "NCHW"); - CHECK(params->node.GetAttr>("out_layout")[0] == "" || - params->node.GetAttr>("out_layout")[0] == "NCHW"); - CHECK_EQ(params->node.GetAttr>("kernel_layout")[0], "OIHW"); + ICHECK_EQ(params->node.GetAttr>("data_layout")[0], "NCHW"); + ICHECK(params->node.GetAttr>("out_layout")[0] == "" || + params->node.GetAttr>("out_layout")[0] == "NCHW"); + ICHECK_EQ(params->node.GetAttr>("kernel_layout")[0], "OIHW"); auto str_dilation = params->node.GetAttr>("dilation"); - CHECK(std::stoi(str_dilation[0]) == 1 && std::stoi(str_dilation[1]) == 1); + ICHECK(std::stoi(str_dilation[0]) == 1 && std::stoi(str_dilation[1]) == 1); auto str_strides = params->node.GetAttr>("strides"); auto str_padding = params->node.GetAttr>("padding"); auto str_output_padding = params->node.GetAttr>("output_padding"); @@ -741,7 +741,7 @@ class Conv2DTransposeOpConverter : public TensorRTOpConverter { #if !TRT_VERSION_GE(5, 1, 5) if (use_asymmetric_padding) { auto pad_layer = params->network->addPadding(*input_tensor, prepadding, postpadding); - CHECK(pad_layer != nullptr); + ICHECK(pad_layer != nullptr); input_tensor = pad_layer->getOutput(0); // No need for conv op to do any padding. use_asymmetric_padding = false; @@ -755,7 +755,7 @@ class Conv2DTransposeOpConverter : public TensorRTOpConverter { nvinfer1::Weights bias{nvinfer1::DataType::kFLOAT, nullptr, 0}; auto deconv_layer = params->network->addDeconvolution(*input_tensor, num_outputs, kernel_size, params->inputs.at(1).weight, bias); - CHECK(deconv_layer != nullptr); + ICHECK(deconv_layer != nullptr); if (use_asymmetric_padding) { #if TRT_VERSION_GE(5, 1, 5) deconv_layer->setPrePadding(prepadding); @@ -791,14 +791,14 @@ class Conv3DTransposeOpConverter : public TensorRTOpConverter { void Convert(TensorRTOpConverterParams* params) const { auto input_tensor = params->inputs.at(0).tensor; auto weight_shape = params->inputs.at(1).weight_shape; - CHECK_EQ(params->node.GetAttr>("data_layout")[0], "NCDHW"); - CHECK(params->node.GetAttr>("out_layout")[0] == "" || - params->node.GetAttr>("out_layout")[0] == "NCDHW"); - CHECK_EQ(params->node.GetAttr>("kernel_layout")[0], "OIDHW"); + ICHECK_EQ(params->node.GetAttr>("data_layout")[0], "NCDHW"); + ICHECK(params->node.GetAttr>("out_layout")[0] == "" || + params->node.GetAttr>("out_layout")[0] == "NCDHW"); + ICHECK_EQ(params->node.GetAttr>("kernel_layout")[0], "OIDHW"); auto str_dilation = params->node.GetAttr>("dilation"); - CHECK_EQ(str_dilation.size(), 3); - CHECK(std::stoi(str_dilation[0]) == 1 && std::stoi(str_dilation[1]) == 1 && - std::stoi(str_dilation[2]) == 1); + ICHECK_EQ(str_dilation.size(), 3); + ICHECK(std::stoi(str_dilation[0]) == 1 && std::stoi(str_dilation[1]) == 1 && + std::stoi(str_dilation[2]) == 1); auto str_strides = params->node.GetAttr>("strides"); auto str_padding = params->node.GetAttr>("padding"); auto str_output_padding = params->node.GetAttr>("output_padding"); @@ -813,14 +813,14 @@ class Conv3DTransposeOpConverter : public TensorRTOpConverter { nvinfer1::Weights bias{nvinfer1::DataType::kFLOAT, nullptr, 0}; auto deconv_layer = params->network->addDeconvolutionNd(*input_tensor, num_outputs, kernel_size, params->inputs.at(1).weight, bias); - CHECK(deconv_layer != nullptr); + ICHECK(deconv_layer != nullptr); if (use_asymmetric_padding) { deconv_layer->setPrePadding(prepadding); deconv_layer->setPostPadding(postpadding); } else { deconv_layer->setPaddingNd(prepadding); } - CHECK_EQ(str_strides.size(), 3); + ICHECK_EQ(str_strides.size(), 3); const auto strides = nvinfer1::Dims3(std::stoi(str_strides[0]), std::stoi(str_strides[1]), std::stoi(str_strides[2])); deconv_layer->setStrideNd(strides); @@ -830,7 +830,7 @@ class Conv3DTransposeOpConverter : public TensorRTOpConverter { if (str_output_padding.size()) { GetPadding3D(str_output_padding, &use_asymmetric_padding, &prepadding, &postpadding); // Are any post-padding values non-zero? - CHECK(!std::any_of(postpadding.d, postpadding.d + postpadding.nbDims, [](int x) { + ICHECK(!std::any_of(postpadding.d, postpadding.d + postpadding.nbDims, [](int x) { return x != 0; })) << "TRT does not support padding on 3 dimensions."; } @@ -882,13 +882,13 @@ class ReshapeOpConverter : public TensorRTOpConverter { void Convert(TensorRTOpConverterParams* params) const { auto input = params->inputs.at(0).tensor; - CHECK_EQ(std::stoi(params->node.GetAttr>("reverse")[0]), false); + ICHECK_EQ(std::stoi(params->node.GetAttr>("reverse")[0]), false); auto str_newshape = params->node.GetAttr>("newshape"); std::vector new_shape; const int start_index = TRT_HAS_IMPLICIT_BATCH(params) ? 1 : 0; for (size_t i = start_index; i < str_newshape.size(); ++i) { const int value = std::stoi(str_newshape[i]); - CHECK_GE(value, -1); + ICHECK_GE(value, -1); new_shape.push_back(value); } params->outputs.push_back(Reshape(params, input, new_shape)); @@ -923,14 +923,14 @@ class ReduceOpConverter : public TensorRTOpConverter { {"min", nvinfer1::ReduceOperation::kMIN}, {"mean", nvinfer1::ReduceOperation::kAVG}}; auto it = op_map.find(params->op_name); - CHECK(it != op_map.end()) << "Unsupported reduce type " << params->op_name; + ICHECK(it != op_map.end()) << "Unsupported reduce type " << params->op_name; auto input = params->inputs.at(0).tensor; - CHECK_EQ(std::stoi(params->node.GetAttr>("exclude")[0]), false); + ICHECK_EQ(std::stoi(params->node.GetAttr>("exclude")[0]), false); bool keepdims = std::stoi(params->node.GetAttr>("keepdims")[0]); auto str_axis = params->node.GetAttr>("axis"); // TODO(trevmorr): Support reduce to scalar. - CHECK_GT(str_axis.size(), 0); + ICHECK_GT(str_axis.size(), 0); uint32_t reduce_axes = 0; for (size_t i = 0; i < str_axis.size(); ++i) { const int axis = ConvertAxis(params, std::stoi(str_axis[i]), input->getDimensions().nbDims); @@ -982,8 +982,8 @@ class AdaptivePoolingOpConverter : public TensorRTOpConverter { {"nn.adaptive_max_pool2d", nvinfer1::PoolingType::kMAX}, {"nn.adaptive_avg_pool2d", nvinfer1::PoolingType::kAVERAGE}}; auto it = op_map.find(params->op_name); - CHECK(it != op_map.end()) << "Unsupported pooling type " << params->op_name << " in TensorRT"; - CHECK_EQ(params->node.GetAttr>("layout")[0], "NCHW"); + ICHECK(it != op_map.end()) << "Unsupported pooling type " << params->op_name << " in TensorRT"; + ICHECK_EQ(params->node.GetAttr>("layout")[0], "NCHW"); // This is an approximation of adaptive pooling. Results will not be // mathematically exact except when output_size is (1, 1). @@ -995,7 +995,7 @@ class AdaptivePoolingOpConverter : public TensorRTOpConverter { const auto window_size = nvinfer1::DimsHW(h - (output_size.h() - 1) * stride.h(), w - (output_size.w() - 1) * stride.w()); auto pool_layer = params->network->addPooling(*input_tensor, it->second, window_size); - CHECK(pool_layer != nullptr); + ICHECK(pool_layer != nullptr); pool_layer->setStride(stride); params->outputs.push_back(pool_layer->getOutput(0)); } diff --git a/src/runtime/contrib/tensorrt/tensorrt_runtime.cc b/src/runtime/contrib/tensorrt/tensorrt_runtime.cc index 72c025695f7d..f183e2f24449 100644 --- a/src/runtime/contrib/tensorrt/tensorrt_runtime.cc +++ b/src/runtime/contrib/tensorrt/tensorrt_runtime.cc @@ -73,7 +73,7 @@ class TensorRTRuntime : public JSONRuntimeBase { * \param consts The constant params from compiled model. */ void Init(const Array& consts) override { - CHECK_EQ(consts.size(), const_idx_.size()) + ICHECK_EQ(consts.size(), const_idx_.size()) << "The number of input constants must match the number of required."; LoadGlobalAttributes(); if (GetCachedEnginesFromDisk()) return; @@ -118,7 +118,7 @@ class TensorRTRuntime : public JSONRuntimeBase { uint32_t eid = EntryID(nid, j); const std::string name = nodes_[nid].GetOpName() + "_" + std::to_string(j); int binding_index = engine->getBindingIndex(name.c_str()); - CHECK_NE(binding_index, -1); + ICHECK_NE(binding_index, -1); bindings[binding_index] = data_entry_[eid]->data; } } @@ -128,18 +128,18 @@ class TensorRTRuntime : public JSONRuntimeBase { uint32_t eid = EntryID(outputs_[i]); const std::string& name = engine_and_context.outputs[i]; int binding_index = engine->getBindingIndex(name.c_str()); - CHECK_NE(binding_index, -1); + ICHECK_NE(binding_index, -1); bindings[binding_index] = data_entry_[eid]->data; } #if TRT_VERSION_GE(6, 0, 1) if (use_implicit_batch_) { - CHECK(context->execute(batch_size_, bindings.data())) << "Running TensorRT failed."; + ICHECK(context->execute(batch_size_, bindings.data())) << "Running TensorRT failed."; } else { - CHECK(context->executeV2(bindings.data())) << "Running TensorRT failed."; + ICHECK(context->executeV2(bindings.data())) << "Running TensorRT failed."; } #else - CHECK(context->execute(batch_size_, bindings.data())) << "Running TensorRT failed."; + ICHECK(context->execute(batch_size_, bindings.data())) << "Running TensorRT failed."; #endif } @@ -162,7 +162,7 @@ class TensorRTRuntime : public JSONRuntimeBase { if (node.GetOpType() == "input") { builder.AddInput(nid, node); } else { - CHECK_EQ(node.GetOpType(), "const"); + ICHECK_EQ(node.GetOpType(), "const"); uint32_t eid = EntryID(nid, 0); builder.AddConstant(nid, data_entry_[eid]); } diff --git a/src/runtime/contrib/tflite/tflite_runtime.cc b/src/runtime/contrib/tflite/tflite_runtime.cc index 8b34e90312b0..f56e62ec1a40 100644 --- a/src/runtime/contrib/tflite/tflite_runtime.cc +++ b/src/runtime/contrib/tflite/tflite_runtime.cc @@ -117,7 +117,7 @@ void TFLiteRuntime::SetInput(int index, DLTensor* data_in) { TVM_DTYPE_DISPATCH(dtype, DType, { DType* dest = interpreter_->typed_input_tensor(index); DType* src = static_cast(data_in->data); - CHECK(data_in->strides == NULL); + ICHECK(data_in->strides == NULL); int64_t size = 1; for (int64_t i = 0; i < data_in->ndim; ++i) { size *= data_in->shape[i]; @@ -155,7 +155,7 @@ PackedFunc TFLiteRuntime::GetFunction(const std::string& name, if (name == "set_input") { return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { int in_idx = args[0]; - CHECK_GE(in_idx, 0); + ICHECK_GE(in_idx, 0); this->SetInput(in_idx, args[1]); }); } else if (name == "get_output") { diff --git a/src/runtime/contrib/tflite/tflite_runtime.h b/src/runtime/contrib/tflite/tflite_runtime.h index f3e3bd90bba4..ff0e6ab0db56 100644 --- a/src/runtime/contrib/tflite/tflite_runtime.h +++ b/src/runtime/contrib/tflite/tflite_runtime.h @@ -37,7 +37,7 @@ namespace tvm { namespace runtime { -#define CHECK_TFLITE_STATUS(ret) CHECK_EQ(ret, kTfLiteOk) +#define CHECK_TFLITE_STATUS(ret) ICHECK_EQ(ret, kTfLiteOk) /*! * \brief Tflite runtime. diff --git a/src/runtime/contrib/thrust/thrust.cu b/src/runtime/contrib/thrust/thrust.cu index c40235d7cc9e..2054db710b6d 100644 --- a/src/runtime/contrib/thrust/thrust.cu +++ b/src/runtime/contrib/thrust/thrust.cu @@ -130,7 +130,7 @@ void thrust_sort_common(DLTensor* input, TVM_REGISTER_GLOBAL("tvm.contrib.thrust.sort_nms") .set_body([](TVMArgs args, TVMRetValue* ret) { - CHECK_GE(args.num_args, 5); + ICHECK_GE(args.num_args, 5); DLTensor* input = args[0]; DLTensor* valid_count = args[1]; DLTensor* values_out = args[2]; @@ -149,7 +149,7 @@ TVM_REGISTER_GLOBAL("tvm.contrib.thrust.sort_nms") TVM_REGISTER_GLOBAL("tvm.contrib.thrust.sort") .set_body([](TVMArgs args, TVMRetValue* ret) { - CHECK_GE(args.num_args, 4); + ICHECK_GE(args.num_args, 4); DLTensor* input = args[0]; DLTensor* values_out = args[1]; DLTensor* indices_out = args[2]; diff --git a/src/runtime/cpu_device_api.cc b/src/runtime/cpu_device_api.cc index 5474b758ca9c..146bfa804785 100644 --- a/src/runtime/cpu_device_api.cc +++ b/src/runtime/cpu_device_api.cc @@ -20,10 +20,10 @@ /*! * \file cpu_device_api.cc */ -#include #include #include #include +#include #include #include diff --git a/src/runtime/cuda/cuda_common.h b/src/runtime/cuda/cuda_common.h index 25ff28a91a6c..471fefb230a1 100644 --- a/src/runtime/cuda/cuda_common.h +++ b/src/runtime/cuda/cuda_common.h @@ -44,10 +44,11 @@ namespace runtime { } \ } -#define CUDA_CALL(func) \ - { \ - cudaError_t e = (func); \ - CHECK(e == cudaSuccess || e == cudaErrorCudartUnloading) << "CUDA: " << cudaGetErrorString(e); \ +#define CUDA_CALL(func) \ + { \ + cudaError_t e = (func); \ + ICHECK(e == cudaSuccess || e == cudaErrorCudartUnloading) \ + << "CUDA: " << cudaGetErrorString(e); \ } /*! \brief Thread local workspace */ diff --git a/src/runtime/cuda/cuda_device_api.cc b/src/runtime/cuda/cuda_device_api.cc index f7b88ccdd964..30abfc8dc559 100644 --- a/src/runtime/cuda/cuda_device_api.cc +++ b/src/runtime/cuda/cuda_device_api.cc @@ -107,7 +107,7 @@ class CUDADeviceAPI final : public DeviceAPI { } void* AllocDataSpace(TVMContext ctx, size_t nbytes, size_t alignment, DLDataType type_hint) final { - CHECK_EQ(256 % alignment, 0U) << "CUDA space is aligned at 256 bytes"; + ICHECK_EQ(256 % alignment, 0U) << "CUDA space is aligned at 256 bytes"; void* ret; if (ctx.device_type == kDLCPUPinned) { CUDA_CALL(cudaMallocHost(&ret, nbytes)); diff --git a/src/runtime/cuda/cuda_module.cc b/src/runtime/cuda/cuda_module.cc index c356897c8e90..a877bc634300 100644 --- a/src/runtime/cuda/cuda_module.cc +++ b/src/runtime/cuda/cuda_module.cc @@ -71,11 +71,11 @@ class CUDAModuleNode : public runtime::ModuleNode { std::string fmt = GetFileFormat(file_name, format); std::string meta_file = GetMetaFilePath(file_name); if (fmt == "cu") { - CHECK_NE(cuda_source_.length(), 0); + ICHECK_NE(cuda_source_.length(), 0); SaveMetaDataToFile(meta_file, fmap_); SaveBinaryToFile(file_name, cuda_source_); } else { - CHECK_EQ(fmt, fmt_) << "Can only save to format=" << fmt_; + ICHECK_EQ(fmt, fmt_) << "Can only save to format=" << fmt_; SaveMetaDataToFile(meta_file, fmap_); SaveBinaryToFile(file_name, data_); } @@ -124,7 +124,7 @@ class CUDAModuleNode : public runtime::ModuleNode { size_t nbytes; CUresult result = cuModuleGetGlobal(&global, &nbytes, module_[device_id], global_name.c_str()); - CHECK_EQ(nbytes, expect_nbytes); + ICHECK_EQ(nbytes, expect_nbytes); if (result != CUDA_SUCCESS) { const char* msg; cuGetErrorName(result, &msg); @@ -232,8 +232,8 @@ class CUDAPrepGlobalBarrier { PackedFunc CUDAModuleNode::GetFunction(const std::string& name, const ObjectPtr& sptr_to_self) { - CHECK_EQ(sptr_to_self.get(), this); - CHECK_NE(name, symbol::tvm_module_main) << "Device function do not have main"; + ICHECK_EQ(sptr_to_self.get(), this); + ICHECK_NE(name, symbol::tvm_module_main) << "Device function do not have main"; if (name == symbol::tvm_prepare_global_barrier) { return PackedFunc(CUDAPrepGlobalBarrier(this, sptr_to_self)); } diff --git a/src/runtime/dso_library.cc b/src/runtime/dso_library.cc index 6d3eec402306..c439bde82497 100644 --- a/src/runtime/dso_library.cc +++ b/src/runtime/dso_library.cc @@ -63,7 +63,7 @@ class DSOLibrary final : public Library { // use wstring version that is needed by LLVM. std::wstring wname(name.begin(), name.end()); lib_handle_ = LoadLibraryW(wname.c_str()); - CHECK(lib_handle_ != nullptr) << "Failed to load dynamic shared library " << name; + ICHECK(lib_handle_ != nullptr) << "Failed to load dynamic shared library " << name; } void Unload() { @@ -76,8 +76,8 @@ class DSOLibrary final : public Library { // load the library void Load(const std::string& name) { lib_handle_ = dlopen(name.c_str(), RTLD_LAZY | RTLD_LOCAL); - CHECK(lib_handle_ != nullptr) << "Failed to load dynamic shared library " << name << " " - << dlerror(); + ICHECK(lib_handle_ != nullptr) + << "Failed to load dynamic shared library " << name << " " << dlerror(); } void* GetSymbol_(const char* name) { return dlsym(lib_handle_, name); } diff --git a/src/runtime/file_utils.cc b/src/runtime/file_utils.cc index c3298d266cdd..42cbfdc3b1ed 100644 --- a/src/runtime/file_utils.cc +++ b/src/runtime/file_utils.cc @@ -23,8 +23,8 @@ #include "file_utils.h" #include -#include #include +#include #include #include @@ -114,7 +114,7 @@ std::string GetMetaFilePath(const std::string& file_name) { void LoadBinaryFromFile(const std::string& file_name, std::string* data) { std::ifstream fs(file_name, std::ios::in | std::ios::binary); - CHECK(!fs.fail()) << "Cannot open " << file_name; + ICHECK(!fs.fail()) << "Cannot open " << file_name; // get its size: fs.seekg(0, std::ios::end); size_t size = static_cast(fs.tellg()); @@ -125,7 +125,7 @@ void LoadBinaryFromFile(const std::string& file_name, std::string* data) { void SaveBinaryToFile(const std::string& file_name, const std::string& data) { std::ofstream fs(file_name, std::ios::out | std::ios::binary); - CHECK(!fs.fail()) << "Cannot open " << file_name; + ICHECK(!fs.fail()) << "Cannot open " << file_name; fs.write(&data[0], data.length()); } @@ -133,7 +133,7 @@ void SaveMetaDataToFile(const std::string& file_name, const std::unordered_map& fmap) { std::string version = "0.1.0"; std::ofstream fs(file_name.c_str()); - CHECK(!fs.fail()) << "Cannot open file " << file_name; + ICHECK(!fs.fail()) << "Cannot open file " << file_name; dmlc::JSONWriter writer(&fs); writer.BeginObject(); writer.WriteObjectKeyValue("tvm_version", version); @@ -145,7 +145,7 @@ void SaveMetaDataToFile(const std::string& file_name, void LoadMetaDataFromFile(const std::string& file_name, std::unordered_map* fmap) { std::ifstream fs(file_name.c_str()); - CHECK(!fs.fail()) << "Cannot open file " << file_name; + ICHECK(!fs.fail()) << "Cannot open file " << file_name; std::string version; dmlc::JSONReader reader(&fs); dmlc::JSONObjectReadHelper helper; diff --git a/src/runtime/graph/debug/graph_runtime_debug.cc b/src/runtime/graph/debug/graph_runtime_debug.cc index 5439be9109f9..3e9ff4f279e7 100644 --- a/src/runtime/graph/debug/graph_runtime_debug.cc +++ b/src/runtime/graph/debug/graph_runtime_debug.cc @@ -148,7 +148,7 @@ class GraphRuntimeDebug : public GraphRuntime { * \param data_out the node data. */ void DebugGetNodeOutput(int index, DLTensor* data_out) { - CHECK_LT(static_cast(index), op_execs_.size()); + ICHECK_LT(static_cast(index), op_execs_.size()); uint32_t eid = index; for (size_t i = 0; i < op_execs_.size(); ++i) { @@ -185,9 +185,9 @@ PackedFunc GraphRuntimeDebug::GetFunction(const std::string& name, int number = args[0]; int repeat = args[1]; int min_repeat_ms = args[2]; - CHECK_GT(number, 0); - CHECK_GT(repeat, 0); - CHECK_GE(min_repeat_ms, 0); + ICHECK_GT(number, 0); + ICHECK_GT(repeat, 0); + ICHECK_GE(min_repeat_ms, 0); *rv = this->RunIndividual(number, repeat, min_repeat_ms); }); } else { @@ -209,9 +209,9 @@ Module GraphRuntimeDebugCreate(const std::string& sym_json, const tvm::runtime:: } TVM_REGISTER_GLOBAL("tvm.graph_runtime_debug.create").set_body([](TVMArgs args, TVMRetValue* rv) { - CHECK_GE(args.num_args, 4) << "The expected number of arguments for graph_runtime.create is " - "at least 4, but it has " - << args.num_args; + ICHECK_GE(args.num_args, 4) << "The expected number of arguments for graph_runtime.create is " + "at least 4, but it has " + << args.num_args; *rv = GraphRuntimeDebugCreate(args[0], args[1], GetAllContext(args)); }); } // namespace runtime diff --git a/src/runtime/graph/graph_runtime.cc b/src/runtime/graph/graph_runtime.cc index 18245badcc74..2e1403558d45 100644 --- a/src/runtime/graph/graph_runtime.cc +++ b/src/runtime/graph/graph_runtime.cc @@ -99,7 +99,7 @@ int GraphRuntime::GetInputIndex(const std::string& name) { * \param data_in The input data. */ void GraphRuntime::SetInput(int index, DLTensor* data_in) { - CHECK_LT(static_cast(index), input_nodes_.size()); + ICHECK_LT(static_cast(index), input_nodes_.size()); uint32_t eid = this->entry_id(input_nodes_[index], 0); data_entry_[eid].CopyFrom(data_in); } @@ -109,18 +109,18 @@ void GraphRuntime::SetInput(int index, DLTensor* data_in) { * \param data_ref The input data that is referred. */ void GraphRuntime::SetInputZeroCopy(int index, DLTensor* data_ref) { - CHECK_LT(static_cast(index), input_nodes_.size()); + ICHECK_LT(static_cast(index), input_nodes_.size()); uint32_t eid = this->entry_id(input_nodes_[index], 0); const DLTensor* old_t = data_entry_[eid].operator->(); // check the consistency of input - CHECK_EQ(data_alignment_[eid], details::GetDataAlignment(*data_ref)); - CHECK_EQ(reinterpret_cast(data_ref->data) % kAllocAlignment, 0); - CHECK_EQ(old_t->ndim, static_cast(data_ref->ndim)); - CHECK_EQ(old_t->ctx.device_type, data_ref->ctx.device_type); - CHECK_EQ(old_t->ctx.device_id, data_ref->ctx.device_id); + ICHECK_EQ(data_alignment_[eid], details::GetDataAlignment(*data_ref)); + ICHECK_EQ(reinterpret_cast(data_ref->data) % kAllocAlignment, 0); + ICHECK_EQ(old_t->ndim, static_cast(data_ref->ndim)); + ICHECK_EQ(old_t->ctx.device_type, data_ref->ctx.device_type); + ICHECK_EQ(old_t->ctx.device_id, data_ref->ctx.device_id); for (auto i = 0; i < data_ref->ndim; ++i) { - CHECK_EQ(old_t->shape[i], data_ref->shape[i]); + ICHECK_EQ(old_t->shape[i], data_ref->shape[i]); } // Update the data pointer for each argument of each op @@ -147,7 +147,7 @@ int GraphRuntime::NumInputs() const { return input_nodes_.size(); } * \return NDArray corresponding to given input node index. */ NDArray GraphRuntime::GetInput(int index) const { - CHECK_LT(static_cast(index), input_nodes_.size()); + ICHECK_LT(static_cast(index), input_nodes_.size()); uint32_t eid = this->entry_id(input_nodes_[index], 0); return data_entry_[eid]; } @@ -158,7 +158,7 @@ NDArray GraphRuntime::GetInput(int index) const { * \return NDArray corresponding to given output node index. */ NDArray GraphRuntime::GetOutput(int index) const { - CHECK_LT(static_cast(index), outputs_.size()); + ICHECK_LT(static_cast(index), outputs_.size()); uint32_t eid = this->entry_id(outputs_[index]); return data_entry_[eid]; } @@ -168,14 +168,14 @@ NDArray GraphRuntime::GetOutput(int index) const { * \param data_out the output data. */ void GraphRuntime::CopyOutputTo(int index, DLTensor* data_out) { - CHECK_LT(static_cast(index), outputs_.size()); + ICHECK_LT(static_cast(index), outputs_.size()); uint32_t eid = this->entry_id(outputs_[index]); // Check the shapes to avoid receiving in different dimension but same size. const NDArray& data = data_entry_[eid]; - CHECK_EQ(data->ndim, data_out->ndim); + ICHECK_EQ(data->ndim, data_out->ndim); for (int32_t j = 0; j < data->ndim; ++j) { - CHECK_EQ(data->shape[j], data_out->shape[j]); + ICHECK_EQ(data->shape[j], data_out->shape[j]); } data_entry_[eid].CopyTo(data_out); @@ -192,16 +192,16 @@ void GraphRuntime::LoadParams(const std::string& param_blob) { void GraphRuntime::LoadParams(dmlc::Stream* strm) { uint64_t header, reserved; - CHECK(strm->Read(&header)) << "Invalid parameters file format"; - CHECK(header == kTVMNDArrayListMagic) << "Invalid parameters file format"; - CHECK(strm->Read(&reserved)) << "Invalid parameters file format"; + ICHECK(strm->Read(&header)) << "Invalid parameters file format"; + ICHECK(header == kTVMNDArrayListMagic) << "Invalid parameters file format"; + ICHECK(strm->Read(&reserved)) << "Invalid parameters file format"; std::vector names; - CHECK(strm->Read(&names)) << "Invalid parameters file format"; + ICHECK(strm->Read(&names)) << "Invalid parameters file format"; uint64_t sz; strm->Read(&sz); size_t size = static_cast(sz); - CHECK(size == names.size()) << "Invalid parameters file format"; + ICHECK(size == names.size()) << "Invalid parameters file format"; for (size_t i = 0; i < size; ++i) { int in_idx = GetInputIndex(names[i]); if (in_idx < 0) { @@ -210,7 +210,7 @@ void GraphRuntime::LoadParams(dmlc::Stream* strm) { continue; } uint32_t eid = this->entry_id(input_nodes_[in_idx], 0); - CHECK_LT(eid, data_entry_.size()); + ICHECK_LT(eid, data_entry_.size()); // The data_entry is allocated on device, NDArray.load always load the array into CPU. NDArray temp; @@ -221,23 +221,23 @@ void GraphRuntime::LoadParams(dmlc::Stream* strm) { void GraphRuntime::ShareParams(const GraphRuntime& other, dmlc::Stream* strm) { uint64_t header, reserved; - CHECK(strm->Read(&header)) << "Invalid parameters file format"; - CHECK(header == kTVMNDArrayListMagic) << "Invalid parameters file format"; - CHECK(strm->Read(&reserved)) << "Invalid parameters file format"; + ICHECK(strm->Read(&header)) << "Invalid parameters file format"; + ICHECK(header == kTVMNDArrayListMagic) << "Invalid parameters file format"; + ICHECK(strm->Read(&reserved)) << "Invalid parameters file format"; std::vector names; - CHECK(strm->Read(&names)) << "Invalid parameters file format"; + ICHECK(strm->Read(&names)) << "Invalid parameters file format"; uint64_t sz; strm->Read(&sz); size_t size = static_cast(sz); - CHECK(size == names.size()) << "Invalid parameters file format"; + ICHECK(size == names.size()) << "Invalid parameters file format"; for (size_t i = 0; i < size; ++i) { int in_idx = GetInputIndex(names[i]); if (in_idx < 0) continue; uint32_t eid = this->entry_id(input_nodes_[in_idx], 0); - CHECK_LT(eid, data_entry_.size()); - CHECK_EQ(data_entry_[eid].use_count(), 1); + ICHECK_LT(eid, data_entry_.size()); + ICHECK_EQ(data_entry_[eid].use_count(), 1); data_entry_[eid] = other.GetInput(GetInputIndex(names[i])); - CHECK_GT(data_entry_[eid].use_count(), 1); + ICHECK_GT(data_entry_[eid].use_count(), 1); const DLTensor* tmp = data_entry_[eid].operator->(); data_alignment_[eid] = details::GetDataAlignment(*tmp); } @@ -265,17 +265,17 @@ void GraphRuntime::SetupStorage() { for (int64_t sz : attrs_.shape[i]) { size *= static_cast(sz); } - CHECK_GE(storage_id, 0) << "Do not support runtime shape op"; + ICHECK_GE(storage_id, 0) << "Do not support runtime shape op"; DLDataType t = vtype[i]; size_t bits = t.bits * t.lanes; - CHECK(bits % 8U == 0U || bits == 1U); + ICHECK(bits % 8U == 0U || bits == 1U); size_t bytes = ((bits + 7U) / 8U) * size; uint32_t sid = static_cast(storage_id); if (sid >= pool_entry.size()) { pool_entry.resize(sid + 1, {0, -1}); } else { - CHECK(pool_entry[sid].device_type == -1 || pool_entry[sid].device_type == device_type) + ICHECK(pool_entry[sid].device_type == -1 || pool_entry[sid].device_type == device_type) << "The same pool entry cannot be assigned to multiple devices"; } pool_entry[sid].size = std::max(pool_entry[sid].size, bytes); @@ -302,7 +302,7 @@ void GraphRuntime::SetupStorage() { data_alignment_.resize(num_node_entries()); for (size_t i = 0; i < data_entry_.size(); ++i) { int storage_id = attrs_.storage_id[i]; - CHECK_LT(static_cast(storage_id), storage_pool_.size()); + ICHECK_LT(static_cast(storage_id), storage_pool_.size()); data_entry_[i] = storage_pool_[storage_id].CreateView(attrs_.shape[i], vtype[i]); const DLTensor* tmp = data_entry_[i].operator->(); data_alignment_[i] = details::GetDataAlignment(*tmp); @@ -331,7 +331,7 @@ void GraphRuntime::SetupOpExecs() { uint32_t eid = this->entry_id(nid, index); args.push_back(*(data_entry_[eid].operator->())); } - CHECK(inode.op_type == "tvm_op") << "Can only take tvm_op as op"; + ICHECK(inode.op_type == "tvm_op") << "Can only take tvm_op as op"; std::shared_ptr op_args = nullptr; std::tie(op_execs_[nid], op_args) = CreateTVMOp(inode.param, args, inode.inputs.size()); @@ -384,7 +384,7 @@ std::pair, std::shared_ptr > GraphRu // Get compiled function from the module that contains both host and device // code. tvm::runtime::PackedFunc pf = module_.GetFunction(param.func_name, true); - CHECK(pf != nullptr) << "no such function in module: " << param.func_name; + ICHECK(pf != nullptr) << "no such function in module: " << param.func_name; auto fexec = [arg_ptr, pf]() { TVMRetValue rv; @@ -451,7 +451,7 @@ PackedFunc GraphRuntime::GetFunction(const std::string& name, } else if (name == "share_params") { return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { const auto& module = args[0].operator Module(); - CHECK_EQ(module.operator->()->type_key(), "GraphRuntime"); + ICHECK_EQ(module.operator->()->type_key(), "GraphRuntime"); const auto& param_blob = args[1].operator std::string(); dmlc::MemoryStringStream strm(const_cast(¶m_blob)); this->ShareParams(dynamic_cast(*module.operator->()), &strm); @@ -488,9 +488,9 @@ std::vector GetAllContext(const TVMArgs& args) { // be passed in. The third one is the number of devices. // Eventually, we will only probably pass TVMContext for all the languages. TVM_REGISTER_GLOBAL("tvm.graph_runtime.create").set_body([](TVMArgs args, TVMRetValue* rv) { - CHECK_GE(args.num_args, 4) << "The expected number of arguments for graph_runtime.create is " - "at least 4, but it has " - << args.num_args; + ICHECK_GE(args.num_args, 4) << "The expected number of arguments for graph_runtime.create is " + "at least 4, but it has " + << args.num_args; const auto& contexts = GetAllContext(args); *rv = GraphRuntimeCreate(args[0], args[1], contexts); }); diff --git a/src/runtime/graph/graph_runtime.h b/src/runtime/graph/graph_runtime.h index 617ff3e25662..810ff43fe97a 100644 --- a/src/runtime/graph/graph_runtime.h +++ b/src/runtime/graph/graph_runtime.h @@ -41,10 +41,10 @@ namespace tvm { namespace runtime { /*! \brief macro to do C API call */ -#define TVM_CCALL(func) \ - { \ - int ret = (func); \ - CHECK_EQ(ret, 0) << TVMGetLastError(); \ +#define TVM_CCALL(func) \ + { \ + int ret = (func); \ + ICHECK_EQ(ret, 0) << TVMGetLastError(); \ } /*! \brief Magic number for NDArray list file */ @@ -192,13 +192,13 @@ class TVM_DLL GraphRuntime : public ModuleNode { // JSON Loader void Load(dmlc::JSONReader* reader) { reader->BeginArray(); - CHECK(reader->NextArrayItem()) << "invalid json format"; + ICHECK(reader->NextArrayItem()) << "invalid json format"; reader->Read(&node_id); - CHECK(reader->NextArrayItem()) << "invalid json format"; + ICHECK(reader->NextArrayItem()) << "invalid json format"; reader->Read(&index); if (reader->NextArrayItem()) { reader->Read(&version); - CHECK(!reader->NextArrayItem()) << "invalid json format"; + ICHECK(!reader->NextArrayItem()) << "invalid json format"; } else { version = 0; } @@ -238,7 +238,7 @@ class TVM_DLL GraphRuntime : public ModuleNode { bitmask |= 8; } } - CHECK_EQ(bitmask, 1 | 2 | 4 | 8) << "invalid format"; + ICHECK_EQ(bitmask, 1 | 2 | 4 | 8) << "invalid format"; } // JSON Loader void Load(dmlc::JSONReader* reader) { @@ -263,7 +263,7 @@ class TVM_DLL GraphRuntime : public ModuleNode { LOG(FATAL) << "do not support key " << key; } } - CHECK_EQ(bitmask, 1 | 2 | 4) << "invalid format"; + ICHECK_EQ(bitmask, 1 | 2 | 4) << "invalid format"; } }; struct GraphAttr { @@ -280,58 +280,58 @@ class TVM_DLL GraphRuntime : public ModuleNode { while (reader->NextObjectItem(&key)) { if (key == "dltype") { reader->BeginArray(); - CHECK(reader->NextArrayItem()); + ICHECK(reader->NextArrayItem()); reader->Read(&type); - CHECK_EQ(type, "list_str"); - CHECK(reader->NextArrayItem()); + ICHECK_EQ(type, "list_str"); + ICHECK(reader->NextArrayItem()); reader->Read(&dltype); - CHECK(!reader->NextArrayItem()); + ICHECK(!reader->NextArrayItem()); bitmask |= 1; } else if (key == "storage_id") { reader->BeginArray(); - CHECK(reader->NextArrayItem()); + ICHECK(reader->NextArrayItem()); reader->Read(&type); - CHECK_EQ(type, "list_int"); - CHECK(reader->NextArrayItem()); + ICHECK_EQ(type, "list_int"); + ICHECK(reader->NextArrayItem()); reader->Read(&storage_id); - CHECK(!reader->NextArrayItem()); + ICHECK(!reader->NextArrayItem()); bitmask |= 2; } else if (key == "shape") { reader->BeginArray(); - CHECK(reader->NextArrayItem()); + ICHECK(reader->NextArrayItem()); reader->Read(&type); - CHECK_EQ(type, "list_shape"); - CHECK(reader->NextArrayItem()); + ICHECK_EQ(type, "list_shape"); + ICHECK(reader->NextArrayItem()); reader->Read(&shape); - CHECK(!reader->NextArrayItem()); + ICHECK(!reader->NextArrayItem()); bitmask |= 4; } else if (key == "device_index") { reader->BeginArray(); - CHECK(reader->NextArrayItem()); + ICHECK(reader->NextArrayItem()); reader->Read(&type); - CHECK_EQ(type, "list_int"); - CHECK(reader->NextArrayItem()); + ICHECK_EQ(type, "list_int"); + ICHECK(reader->NextArrayItem()); reader->Read(&device_index); - CHECK(!reader->NextArrayItem()); + ICHECK(!reader->NextArrayItem()); } else { reader->BeginArray(); - CHECK(reader->NextArrayItem()); + ICHECK(reader->NextArrayItem()); reader->Read(&type); if (type == "list_int") { - CHECK(reader->NextArrayItem()); + ICHECK(reader->NextArrayItem()); std::vector temp; reader->Read(&temp); } else if (type == "size_t") { - CHECK(reader->NextArrayItem()); + ICHECK(reader->NextArrayItem()); size_t temp; reader->Read(&temp); } else { LOG(FATAL) << "cannot skip graph attr " << key; } - CHECK(!reader->NextArrayItem()); + ICHECK(!reader->NextArrayItem()); } } - CHECK_EQ(bitmask, 1 | 2 | 4) << "invalid format"; + ICHECK_EQ(bitmask, 1 | 2 | 4) << "invalid format"; } }; // The graph attribute fields. @@ -361,7 +361,7 @@ class TVM_DLL GraphRuntime : public ModuleNode { LOG(FATAL) << "key " << key << " is not supported"; } } - CHECK_EQ(bitmask, 1 | 2 | 4 | 8 | 16) << "invalid format"; + ICHECK_EQ(bitmask, 1 | 2 | 4 | 8 | 16) << "invalid format"; } /*! \brief Setup the temporal storage */ void SetupStorage(); diff --git a/src/runtime/graph/graph_runtime_factory.cc b/src/runtime/graph/graph_runtime_factory.cc index aa35afaf70f8..632a25c987bc 100644 --- a/src/runtime/graph/graph_runtime_factory.cc +++ b/src/runtime/graph/graph_runtime_factory.cc @@ -55,9 +55,9 @@ PackedFunc GraphRuntimeFactory::GetFunction( }); } else if (name == "debug_create") { return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { - CHECK_GE(args.size(), 2); + ICHECK_GE(args.size(), 2); std::string module_name = args[0].operator String(); - CHECK(module_name == module_name_) << "Currently we only support single model for now."; + ICHECK(module_name == module_name_) << "Currently we only support single model for now."; std::vector contexts; for (int i = 1; i < args.num_args; ++i) { contexts.emplace_back(args[i].operator TVMContext()); @@ -86,7 +86,7 @@ void GraphRuntimeFactory::SaveToBinary(dmlc::Stream* stream) { arrays.emplace_back(const_cast(v.second.operator->())); } uint64_t sz = arrays.size(); - CHECK(sz == names.size()); + ICHECK(sz == names.size()); stream->Write(sz); stream->Write(names); for (size_t i = 0; i < sz; ++i) { @@ -105,8 +105,8 @@ Module GraphRuntimeFactory::RuntimeCreate(const std::vector& ctxs) { Module GraphRuntimeFactory::DebugRuntimeCreate(const std::vector& ctxs) { const PackedFunc* pf = tvm::runtime::Registry::Get("tvm.graph_runtime_debug.create"); - CHECK(pf != nullptr) << "Cannot find function tvm.graph_runtime_debug.create in registry. " - "Do you enable debug graph runtime build?"; + ICHECK(pf != nullptr) << "Cannot find function tvm.graph_runtime_debug.create in registry. " + "Do you enable debug graph runtime build?"; // Debug runtime create packed function will call GetAllContexs, so we unpack the ctxs. std::vector unpacked_ctxs; for (const auto& ctx : ctxs) { @@ -135,29 +135,29 @@ Module GraphRuntimeFactoryModuleLoadBinary(void* strm) { std::string graph_json; std::unordered_map params; std::string module_name; - CHECK(stream->Read(&graph_json)); + ICHECK(stream->Read(&graph_json)); uint64_t sz; - CHECK(stream->Read(&sz)); + ICHECK(stream->Read(&sz)); std::vector names; - CHECK(stream->Read(&names)); - CHECK(sz == names.size()); + ICHECK(stream->Read(&names)); + ICHECK(sz == names.size()); for (size_t i = 0; i < sz; ++i) { tvm::runtime::NDArray temp; temp.Load(stream); params[names[i]] = temp; } - CHECK(stream->Read(&module_name)); + ICHECK(stream->Read(&module_name)); auto exec = make_object(graph_json, params, module_name); return Module(exec); } TVM_REGISTER_GLOBAL("tvm.graph_runtime_factory.create").set_body([](TVMArgs args, TVMRetValue* rv) { - CHECK_GE(args.num_args, 3) << "The expected number of arguments for " - "graph_runtime_factory.create needs at least 3, " - "but it has " - << args.num_args; + ICHECK_GE(args.num_args, 3) << "The expected number of arguments for " + "graph_runtime_factory.create needs at least 3, " + "but it has " + << args.num_args; // The argument order is graph_json, module, module_name, params. - CHECK_EQ((args.size() - 3) % 2, 0); + ICHECK_EQ((args.size() - 3) % 2, 0); std::unordered_map params; for (size_t i = 3; i < static_cast(args.size()); i += 2) { std::string name = args[i].operator String(); diff --git a/src/runtime/hexagon/hexagon_device_api.cc b/src/runtime/hexagon/hexagon_device_api.cc index a89015707f99..605c55eb89b9 100644 --- a/src/runtime/hexagon/hexagon_device_api.cc +++ b/src/runtime/hexagon/hexagon_device_api.cc @@ -17,9 +17,9 @@ * under the License. */ -#include #include #include +#include #include #include @@ -60,12 +60,12 @@ inline void HexagonDeviceAPI::GetAttr(TVMContext ctx, DeviceAttrKind kind, TVMRe inline void* HexagonDeviceAPI::AllocDataSpace(TVMContext ctx, size_t nbytes, size_t alignment, DLDataType type_hint) { - CHECK(hexagon::Device::ValidateDeviceId(ctx.device_id)); + ICHECK(hexagon::Device::ValidateDeviceId(ctx.device_id)); return hexagon::Device::Global()->Alloc(nbytes, alignment); } inline void HexagonDeviceAPI::FreeDataSpace(TVMContext ctx, void* ptr) { - CHECK(hexagon::Device::ValidateDeviceId(ctx.device_id)); + ICHECK(hexagon::Device::ValidateDeviceId(ctx.device_id)); hexagon::Device::Global()->Free(ptr); } @@ -85,22 +85,22 @@ inline void HexagonDeviceAPI::CopyDataFromTo(const void* from, size_t from_offse if (ctx_from.device_type == kDLCPU) { memmove(dst, src, num_bytes); } else if (static_cast(ctx_from.device_type) == kDLHexagon) { - CHECK(hexagon::Device::ValidateDeviceId(ctx_from.device_id)); - CHECK_EQ(ctx_from.device_id, ctx_to.device_id); - CHECK(Is32bit(dst) && Is32bit(src)); + ICHECK(hexagon::Device::ValidateDeviceId(ctx_from.device_id)); + ICHECK_EQ(ctx_from.device_id, ctx_to.device_id); + ICHECK(Is32bit(dst) && Is32bit(src)); hexagon::Device::Global()->CopyDeviceToDevice(dst, src, num_bytes); } } else { if (ctx_from.device_type == kDLCPU) { - CHECK_EQ(static_cast(ctx_to.device_type), kDLHexagon); - CHECK(Is32bit(dst)); - CHECK(hexagon::Device::ValidateDeviceId(ctx_to.device_id)); + ICHECK_EQ(static_cast(ctx_to.device_type), kDLHexagon); + ICHECK(Is32bit(dst)); + ICHECK(hexagon::Device::ValidateDeviceId(ctx_to.device_id)); hexagon::Device::Global()->CopyHostToDevice(dst, src, num_bytes); } else { - CHECK_EQ(static_cast(ctx_from.device_type), kDLHexagon); - CHECK_EQ(ctx_to.device_type, kDLCPU); - CHECK(Is32bit(src)); - CHECK(hexagon::Device::ValidateDeviceId(ctx_from.device_id)); + ICHECK_EQ(static_cast(ctx_from.device_type), kDLHexagon); + ICHECK_EQ(ctx_to.device_type, kDLCPU); + ICHECK(Is32bit(src)); + ICHECK(hexagon::Device::ValidateDeviceId(ctx_from.device_id)); hexagon::Device::Global()->CopyDeviceToHost(dst, src, num_bytes); } } @@ -109,7 +109,7 @@ inline void HexagonDeviceAPI::CopyDataFromTo(const void* from, size_t from_offse inline void HexagonDeviceAPI::StreamSync(TVMContext ctx, TVMStreamHandle stream) {} inline void* HexagonDeviceAPI::AllocWorkspace(TVMContext ctx, size_t nbytes, DLDataType type_hint) { - CHECK(hexagon::Device::ValidateDeviceId(ctx.device_id)); + ICHECK(hexagon::Device::ValidateDeviceId(ctx.device_id)); if (type_hint.code == 100) { size_t align = std::min(nbytes, 2048lu); return hexagon::Device::Global()->AllocVtcm(nbytes, align); @@ -118,7 +118,7 @@ inline void* HexagonDeviceAPI::AllocWorkspace(TVMContext ctx, size_t nbytes, DLD } inline void HexagonDeviceAPI::FreeWorkspace(TVMContext ctx, void* ptr) { - CHECK(hexagon::Device::ValidateDeviceId(ctx.device_id)); + ICHECK(hexagon::Device::ValidateDeviceId(ctx.device_id)); DeviceAPI::FreeWorkspace(ctx, ptr); } diff --git a/src/runtime/hexagon/hexagon_module.cc b/src/runtime/hexagon/hexagon_module.cc index 305fd50cbdd5..994e24b99084 100644 --- a/src/runtime/hexagon/hexagon_module.cc +++ b/src/runtime/hexagon/hexagon_module.cc @@ -22,8 +22,8 @@ #ifdef __ANDROID__ #include #endif -#include #include +#include #include #include @@ -176,8 +176,8 @@ void ArgLayout::Push(uint32_t* v, unsigned t_size, unsigned t_align) { if (!InReg) { // Allocate on stack. - CHECK_EQ((t_align & (t_align - 1)), 0) << "Alignment should be a power of 2"; - CHECK_GE(t_align, 4) << "Alignment should be at least 4"; + ICHECK_EQ((t_align & (t_align - 1)), 0) << "Alignment should be a power of 2"; + ICHECK_GE(t_align, 4) << "Alignment should be at least 4"; // Round t_size up to a multiple of 4. unsigned s_size = Stack.size(); unsigned s_align = t_align / 4; // Alignment of T in words on the stack. @@ -223,18 +223,18 @@ class HexagonModuleNode final : public runtime::ModuleNode { std::string meta_file = GetMetaFilePath(file_name); SaveMetaDataToFile(meta_file, fmap_); std::string c = "cp " + data_ + " " + file_name; - CHECK(std::system(c.c_str()) == 0) << "Cannot create " + file_name; + ICHECK(std::system(c.c_str()) == 0) << "Cannot create " + file_name; } else if (fmt == "s" || fmt == "asm") { - CHECK(!asm_.empty()) << "Assembler source not available"; + ICHECK(!asm_.empty()) << "Assembler source not available"; SaveBinaryToFile(file_name, asm_); } else if (fmt == "o" || fmt == "obj") { - CHECK(!obj_.empty()) << "Object data not available"; + ICHECK(!obj_.empty()) << "Object data not available"; SaveBinaryToFile(file_name, obj_); } else if (fmt == "ll") { - CHECK(!ir_.empty()) << "LLVM IR source not available"; + ICHECK(!ir_.empty()) << "LLVM IR source not available"; SaveBinaryToFile(file_name, ir_); } else if (fmt == "bc") { - CHECK(!bc_.empty()) << "LLVM IR bitcode not available"; + ICHECK(!bc_.empty()) << "LLVM IR bitcode not available"; SaveBinaryToFile(file_name, bc_); } else { LOG(FATAL) << "HexagonModuleNode::SaveToFile: unhandled format `" << fmt << "'"; @@ -480,7 +480,7 @@ hexagon::ArgLayout HexagonModuleNode::BuildArgLayout(const TVMArgs& As) const { // types, so there is no way to tell if the value being passed needs // one or two registers. Assume that all integers are 32-bit, and // simply abort if the actual value does not fit. - CHECK_EQ(static_cast(A), static_cast(A)); + ICHECK_EQ(static_cast(A), static_cast(A)); Args.Push(static_cast(A)); break; // 64-bit values diff --git a/src/runtime/hexagon/hexagon_module.h b/src/runtime/hexagon/hexagon_module.h index b922b169bd61..e558997b7a4c 100644 --- a/src/runtime/hexagon/hexagon_module.h +++ b/src/runtime/hexagon/hexagon_module.h @@ -20,8 +20,8 @@ #ifndef TVM_RUNTIME_HEXAGON_HEXAGON_MODULE_H_ #define TVM_RUNTIME_HEXAGON_HEXAGON_MODULE_H_ -#include #include +#include #include #include diff --git a/src/runtime/hexagon/sim/hexagon_device_sim.cc b/src/runtime/hexagon/sim/hexagon_device_sim.cc index 9ff5a0421d51..6cc7dcf3209f 100644 --- a/src/runtime/hexagon/sim/hexagon_device_sim.cc +++ b/src/runtime/hexagon/sim/hexagon_device_sim.cc @@ -17,12 +17,12 @@ * under the License. */ -#include #include #include #include #include #include +#include #include #include @@ -107,7 +107,7 @@ struct non_const_str { } size_t size() const { return pointers_.size(); } operator char*() { - CHECK_EQ(pointers_.size(), 1); + ICHECK_EQ(pointers_.size(), 1); return pointers_[0]; } operator char* *() { return pointers_.data(); } @@ -394,17 +394,17 @@ decltype(HexagonSimulator::opt_map_) HexagonSimulator::opt_map_ = { {"--verbose", &HexagonSimulator::HandleVerbose}, }; -#define CHECKED_CALL(func, ...) \ - do { \ - HEXAPI_Status s = sim_->func(__VA_ARGS__); \ - CHECK_EQ(s, HEX_STAT_SUCCESS) << "HexagonSimulator: " #func " failed with code " \ - << HexagonSimulator::to_string(s); \ +#define CHECKED_CALL(func, ...) \ + do { \ + HEXAPI_Status s = sim_->func(__VA_ARGS__); \ + ICHECK_EQ(s, HEX_STAT_SUCCESS) \ + << "HexagonSimulator: " #func " failed with code " << HexagonSimulator::to_string(s); \ } while (false) inline HEX_VA_t HexagonSimulator::p2va(const void* p) { uintptr_t u = reinterpret_cast(p); HEX_VA_t va = static_cast(u); - CHECK_EQ(static_cast(va), u); + ICHECK_EQ(static_cast(va), u); return va; } @@ -425,13 +425,13 @@ template void HexagonSimulator::CopyNToV(HEX_VA_t dst, const void* host_src) { using src_uint_t = typename unalign::type>::type; auto* ps = reinterpret_cast(host_src); - CHECK_EQ(sim_->WriteVirtual(dst, -1u, N, ps->value), HEX_STAT_SUCCESS); + ICHECK_EQ(sim_->WriteVirtual(dst, -1u, N, ps->value), HEX_STAT_SUCCESS); } template void HexagonSimulator::CopyNFromV(void* host_dst, HEX_VA_t src) { typename uint::type v; - CHECK_EQ(sim_->ReadVirtual(src, -1u, N, &v), HEX_STAT_SUCCESS); + ICHECK_EQ(sim_->ReadVirtual(src, -1u, N, &v), HEX_STAT_SUCCESS); using dst_uint_t = typename unalign::type>::type; auto* pd = reinterpret_cast(host_dst); @@ -465,7 +465,7 @@ void HexagonSimulator::CopyToV(HEX_VA_t dst, const void* host_src, unsigned len) src++; len--; } - CHECK_EQ(len, 0); + ICHECK_EQ(len, 0); } void HexagonSimulator::CopyFromV(void* host_dst, HEX_VA_t src, unsigned len) { @@ -495,7 +495,7 @@ void HexagonSimulator::CopyFromV(void* host_dst, HEX_VA_t src, unsigned len) { src++; len--; } - CHECK_EQ(len, 0); + ICHECK_EQ(len, 0); } void HexagonSimulator::SendMsg(Message& m, const void* data, bool show_dbg) { @@ -504,13 +504,13 @@ void HexagonSimulator::SendMsg(Message& m, const void* data, bool show_dbg) { HEX_4u_t result; HEX_8u_t cycles0, cycles1; if (report_cycles) { - CHECK_EQ(sim_->GetSimulatedCycleCount(&cycles0), HEX_STAT_SUCCESS); + ICHECK_EQ(sim_->GetSimulatedCycleCount(&cycles0), HEX_STAT_SUCCESS); } core = sim_->Run(&result); - CHECK_EQ(core, HEX_CORE_BREAKPOINT); + ICHECK_EQ(core, HEX_CORE_BREAKPOINT); if (report_cycles) { - CHECK_EQ(sim_->GetSimulatedCycleCount(&cycles1), HEX_STAT_SUCCESS); + ICHECK_EQ(sim_->GetSimulatedCycleCount(&cycles1), HEX_STAT_SUCCESS); LOG(INFO) << "host: execution took " << (cycles1 - cycles0) << " cycles"; } }; @@ -522,8 +522,8 @@ void HexagonSimulator::SendMsg(Message& m, const void* data, bool show_dbg) { // Receive the acknowledgement with the address for the payload. CopyFromV(&r, message_buffer_v_, sizeof(r)); - CHECK_EQ(r.code, kMsgAck); - CHECK_GE(r.len, m.len); + ICHECK_EQ(r.code, kMsgAck); + ICHECK_GE(r.len, m.len); // Send the actual message. m.va = r.va; @@ -533,7 +533,7 @@ void HexagonSimulator::SendMsg(Message& m, const void* data, bool show_dbg) { // Receive the return data. CopyFromV(&m, message_buffer_v_, sizeof(m)); - CHECK_EQ(m.code, kNone); + ICHECK_EQ(m.code, kNone); } HexagonSimulator::HexagonSimulator(bool enable_queuing) { @@ -610,12 +610,12 @@ void* HexagonSimulator::Alloc(unsigned size, unsigned align) { MsgAlloc ma = {size, align}; SendMsg(m, &ma, true); - CHECK_EQ(sizeof(MsgPointer), m.len); + ICHECK_EQ(sizeof(MsgPointer), m.len); MsgPointer mp; CopyFromV(&mp, m.va, m.len); LOG(INFO) << "HexagonSimulator::Alloc -> " << std::hex << mp.va << std::dec; - CHECK_NE(mp.va, 0); + ICHECK_NE(mp.va, 0); return va2p(mp.va); } @@ -636,12 +636,12 @@ void* HexagonSimulator::AllocVtcm(unsigned size, unsigned align) { MsgAlloc ma = {size, align}; SendMsg(m, &ma, true); - CHECK_EQ(sizeof(MsgPointer), m.len); + ICHECK_EQ(sizeof(MsgPointer), m.len); MsgPointer mp; CopyFromV(&mp, m.va, m.len); LOG(INFO) << "HexagonSimulator::AllocVtcm -> " << std::hex << mp.va << std::dec; - CHECK_NE(mp.va, 0); + ICHECK_NE(mp.va, 0); return va2p(mp.va); } @@ -650,7 +650,7 @@ void HexagonSimulator::FreeVtcm(void* ptr) {} void HexagonSimulator::CopyDeviceToDevice(void* dst, const void* src, unsigned len) { LOG(INFO) << "HexagonSimulator::CopyDeviceToDevice(dst=" << std::hex << dst << ", src=" << src << ", len=" << std::dec << len << ')'; - CHECK(dst != nullptr && src != nullptr); + ICHECK(dst != nullptr && src != nullptr); Message m = {kCopy, sizeof(MsgCopy), 0u}; MsgCopy mc = {p2va(dst), p2va(src), len}; SendMsg(m, &mc, true); @@ -677,7 +677,7 @@ void* HexagonSimulator::Load(const std::string& data, const std::string& fmt) { Message m = {kLoad, static_cast(data.size() + 1), 0u}; SendMsg(m, data.c_str(), false); - CHECK_EQ(sizeof(MsgPointer), m.len); + ICHECK_EQ(sizeof(MsgPointer), m.len); MsgPointer mp; CopyFromV(&mp, m.va, sizeof(mp)); @@ -685,7 +685,7 @@ void* HexagonSimulator::Load(const std::string& data, const std::string& fmt) { } void HexagonSimulator::Unload(void* mod) { - CHECK(mod); + ICHECK(mod); Message m = {kUnload, sizeof(MsgPointer), 0u}; MsgPointer mp = {p2va(mod)}; SendMsg(m, &mp, false); @@ -696,7 +696,7 @@ void* HexagonSimulator::Resolve(const std::string& sym) { Message m = {kResolve, static_cast(sym.size() + 1), 0u}; SendMsg(m, sym.c_str(), true); - CHECK_EQ(sizeof(MsgPointer), m.len); + ICHECK_EQ(sizeof(MsgPointer), m.len); MsgPointer mp; CopyFromV(&mp, m.va, sizeof(mp)); @@ -717,7 +717,7 @@ void HexagonSimulator::Call(void* func, uint32_t* scalar, unsigned sc_num, uint3 // Copy the MsgCall contents into the data vector as a sequence of uints. MsgCall me = {p2va(func), sc_num, st_num}; - CHECK((is_multiple_of())); + ICHECK((is_multiple_of())); for (unsigned i = 0, e = sizeof(me) / sizeof(uint32_t); i != e; ++i) data.push_back(reinterpret_cast(&me)[i]); @@ -763,14 +763,14 @@ bool HexagonSimulator::Configure(string_list& opts) { LOG(FATAL) << "Unrecognized simulator option: " << key; // unreachable } - CHECK((this->*f->second)(opts)) << "error handling option: " << key; + ICHECK((this->*f->second)(opts)) << "error handling option: " << key; } // Check AHB. if (ahb_.first.hasValue() && ahb_.second.hasValue()) { CHECKED_CALL(ConfigureAHB, *ahb_.first, *ahb_.second); } else { - CHECK(!ahb_.first.hasValue() && !ahb_.second.hasValue()) + ICHECK(!ahb_.first.hasValue() && !ahb_.second.hasValue()) << "HexagonSimulator: please specify both low and high addresses " "for AHB"; } @@ -779,7 +779,7 @@ bool HexagonSimulator::Configure(string_list& opts) { if (axi2_.first.hasValue() && axi2_.second.hasValue()) { CHECKED_CALL(ConfigureAXI2, *axi2_.first, *axi2_.second); } else { - CHECK(!axi2_.first.hasValue() && !axi2_.second.hasValue()) + ICHECK(!axi2_.first.hasValue() && !axi2_.second.hasValue()) << "HexagonSimulator: please specify both low and high addresses " "for AXI2"; } @@ -806,7 +806,7 @@ bool HexagonSimulator::HandleAHBBusRatio(string_list& rest) { bool HexagonSimulator::HandleAHBHighAddr(string_list& rest) { auto addr = detail::to_uint(detail::pop_front(rest)); - CHECK(addr) << "HexagonSimulator: invalid value for AHB high adddress"; + ICHECK(addr) << "HexagonSimulator: invalid value for AHB high adddress"; if (addr) { ahb_.second = *addr; } @@ -815,7 +815,7 @@ bool HexagonSimulator::HandleAHBHighAddr(string_list& rest) { bool HexagonSimulator::HandleAHBLowAddr(string_list& rest) { auto addr = detail::to_uint(detail::pop_front(rest)); - CHECK(addr) << "HexagonSimulator: invalid value for AHB low adddress"; + ICHECK(addr) << "HexagonSimulator: invalid value for AHB low adddress"; if (addr) { ahb_.first = *addr; } @@ -841,7 +841,7 @@ bool HexagonSimulator::HandleAXI2BusRatio(string_list& rest) { bool HexagonSimulator::HandleAXI2HighAddr(string_list& rest) { auto addr = detail::to_uint(detail::pop_front(rest)); - CHECK(addr) << "HexagonSimulator: invalid value for AXI2 high adddress"; + ICHECK(addr) << "HexagonSimulator: invalid value for AXI2 high adddress"; if (addr) { axi2_.second = *addr; } @@ -850,7 +850,7 @@ bool HexagonSimulator::HandleAXI2HighAddr(string_list& rest) { bool HexagonSimulator::HandleAXI2LowAddr(string_list& rest) { auto addr = detail::to_uint(detail::pop_front(rest)); - CHECK(addr) << "HexagonSimulator: invalid value for AXI2 low adddress"; + ICHECK(addr) << "HexagonSimulator: invalid value for AXI2 low adddress"; if (addr) { axi2_.first = *addr; } diff --git a/src/runtime/hexagon/target/hexagon_dsprpcapi.cc b/src/runtime/hexagon/target/hexagon_dsprpcapi.cc index bf10feb652cd..d494db82e2c7 100644 --- a/src/runtime/hexagon/target/hexagon_dsprpcapi.cc +++ b/src/runtime/hexagon/target/hexagon_dsprpcapi.cc @@ -21,8 +21,8 @@ #include "hexagon_dsprpcapi.h" #include -#include #include +#include #include "hexagon_target_log.h" @@ -32,7 +32,7 @@ namespace runtime { namespace hexagon { DspRpcAPI::DspRpcAPI() { - CHECK(lib_handle_ = dlopen(rpc_lib_name_, RTLD_LAZY | RTLD_LOCAL)); + ICHECK(lib_handle_ = dlopen(rpc_lib_name_, RTLD_LAZY | RTLD_LOCAL)); #define RESOLVE(n) n##_ = GetSymbol(#n) RESOLVE(remote_handle_close); diff --git a/src/runtime/hexagon/target/hexagon_dsprpcapi.h b/src/runtime/hexagon/target/hexagon_dsprpcapi.h index ca812e6c2f1f..c0e40805ecbf 100644 --- a/src/runtime/hexagon/target/hexagon_dsprpcapi.h +++ b/src/runtime/hexagon/target/hexagon_dsprpcapi.h @@ -21,8 +21,8 @@ #define TVM_RUNTIME_HEXAGON_TARGET_HEXAGON_DSPRPCAPI_H_ #ifdef __ANDROID__ -#include #include +#include #include "remote.h" #include "remote64.h" @@ -109,7 +109,7 @@ class DspRpcAPI { #define DECLFUNC(fn) \ fn##_t* fn##_ptr(bool allow_nullptr = false) const { \ - if (!allow_nullptr) CHECK(fn##_ != nullptr); \ + if (!allow_nullptr) ICHECK(fn##_ != nullptr); \ return fn##_; \ } DECLFUNC(remote_handle_close) diff --git a/src/runtime/hexagon/target/hexagon_stubapi.cc b/src/runtime/hexagon/target/hexagon_stubapi.cc index 2ed33471b98f..5428ae7c1cff 100644 --- a/src/runtime/hexagon/target/hexagon_stubapi.cc +++ b/src/runtime/hexagon/target/hexagon_stubapi.cc @@ -21,9 +21,9 @@ #include "hexagon_stubapi.h" #include -#include #include #include +#include #include "hexagon_target_log.h" @@ -45,7 +45,7 @@ StubAPI::StubAPI() { constexpr auto nondomain_lib_name = "libtvm_remote_nd_stub.so"; const char* lib_name = enable_domains_ ? domain_lib_name : nondomain_lib_name; - CHECK(lib_handle_ = dlopen(lib_name, RTLD_LAZY | RTLD_LOCAL)); + ICHECK(lib_handle_ = dlopen(lib_name, RTLD_LAZY | RTLD_LOCAL)); #define RESOLVE(fn) p##fn##_ = GetSymbol(#fn) if (enable_domains_) { diff --git a/src/runtime/hexagon/target/hexagon_stubapi.h b/src/runtime/hexagon/target/hexagon_stubapi.h index 5213b6d0d7af..cc5b7b7413ca 100644 --- a/src/runtime/hexagon/target/hexagon_stubapi.h +++ b/src/runtime/hexagon/target/hexagon_stubapi.h @@ -22,9 +22,9 @@ #ifdef __ANDROID__ #include -#include #include #include +#include #include diff --git a/src/runtime/library_module.cc b/src/runtime/library_module.cc index a5935491fcd7..30ef2141c508 100644 --- a/src/runtime/library_module.cc +++ b/src/runtime/library_module.cc @@ -46,7 +46,7 @@ class LibraryModuleNode final : public ModuleNode { if (name == runtime::symbol::tvm_module_main) { const char* entry_name = reinterpret_cast(lib_->GetSymbol(runtime::symbol::tvm_module_main)); - CHECK(entry_name != nullptr) + ICHECK(entry_name != nullptr) << "Symbol " << runtime::symbol::tvm_module_main << " is not presented"; faddr = reinterpret_cast(lib_->GetSymbol(entry_name)); } else { @@ -75,7 +75,7 @@ PackedFunc WrapPackedFunc(TVMBackendPackedCFunc faddr, const ObjectPtr& int ret_type_code = kTVMNullptr; int ret = (*faddr)(const_cast(args.values), const_cast(args.type_codes), args.num_args, &ret_value, &ret_type_code, nullptr); - CHECK_EQ(ret, 0) << TVMGetLastError(); + ICHECK_EQ(ret, 0) << TVMGetLastError(); if (ret_type_code != kTVMNullptr) { *rv = TVMRetValue::MoveFromCHost(ret_value, ret_type_code); } @@ -107,7 +107,7 @@ void InitContextFunctions(std::function fgetsymbol) { * \return Root Module. */ runtime::Module ProcessModuleBlob(const char* mblob, ObjectPtr lib) { - CHECK(mblob != nullptr); + ICHECK(mblob != nullptr); uint64_t nbytes = 0; for (size_t i = 0; i < sizeof(nbytes); ++i) { uint64_t c = mblob[i]; @@ -117,21 +117,21 @@ runtime::Module ProcessModuleBlob(const char* mblob, ObjectPtr lib) { static_cast(nbytes)); dmlc::Stream* stream = &fs; uint64_t size; - CHECK(stream->Read(&size)); + ICHECK(stream->Read(&size)); std::vector modules; std::vector import_tree_row_ptr; std::vector import_tree_child_indices; for (uint64_t i = 0; i < size; ++i) { std::string tkey; - CHECK(stream->Read(&tkey)); + ICHECK(stream->Read(&tkey)); // Currently, _lib is for DSOModule, but we // don't have loadbinary function for it currently if (tkey == "_lib") { auto dso_module = Module(make_object(lib)); modules.emplace_back(dso_module); } else if (tkey == "_import_tree") { - CHECK(stream->Read(&import_tree_row_ptr)); - CHECK(stream->Read(&import_tree_child_indices)); + ICHECK(stream->Read(&import_tree_row_ptr)); + ICHECK(stream->Read(&import_tree_child_indices)); } else { std::string loadkey = "runtime.module.loadbinary_"; std::string fkey = loadkey + tkey; @@ -146,7 +146,7 @@ runtime::Module ProcessModuleBlob(const char* mblob, ObjectPtr lib) { loaders += name.substr(loadkey.size()); } } - CHECK(f != nullptr) + ICHECK(f != nullptr) << "Binary was created using " << tkey << " but a loader of that name is not registered. Available loaders are " << loaders << ". Perhaps you need to recompile with this runtime enabled."; @@ -169,12 +169,12 @@ runtime::Module ProcessModuleBlob(const char* mblob, ObjectPtr lib) { for (size_t j = import_tree_row_ptr[i]; j < import_tree_row_ptr[i + 1]; ++j) { auto module_import_addr = ModuleInternal::GetImportsAddr(modules[i].operator->()); auto child_index = import_tree_child_indices[j]; - CHECK(child_index < modules.size()); + ICHECK(child_index < modules.size()); module_import_addr->emplace_back(modules[child_index]); } } } - CHECK(!modules.empty()); + ICHECK(!modules.empty()); // invariance: root module is always at location 0. // The module order is collected via DFS return modules[0]; diff --git a/src/runtime/metadata_module.cc b/src/runtime/metadata_module.cc index 56f894c46906..acef9d4736fd 100644 --- a/src/runtime/metadata_module.cc +++ b/src/runtime/metadata_module.cc @@ -69,7 +69,7 @@ class MetadataModuleNode : public ModuleNode { // Run the module. // Normally we would only have a limited number of submodules. The runtime // symobl lookup overhead should be minimal. - CHECK(!this->imports().empty()); + ICHECK(!this->imports().empty()); for (Module it : this->imports()) { PackedFunc pf = it.GetFunction(name); if (pf != nullptr) return pf; @@ -86,10 +86,10 @@ class MetadataModuleNode : public ModuleNode { */ Array GetRequiredMetadata(const std::string& symbol) { Array ret; - CHECK_GT(sym_vars_.count(symbol), 0U) << "No symbol is recorded for " << symbol; + ICHECK_GT(sym_vars_.count(symbol), 0U) << "No symbol is recorded for " << symbol; std::vector vars = sym_vars_[symbol]; for (const auto& it : vars) { - CHECK_GT(metadata_.count(it), 0U) << "Found not recorded constant variable: " << it; + ICHECK_GT(metadata_.count(it), 0U) << "Found not recorded constant variable: " << it; ret.push_back(metadata_[it]); } return ret; @@ -119,7 +119,7 @@ class MetadataModuleNode : public ModuleNode { // Initialize the module with metadata. int ret = init(md); // Report the error if initialization is failed. - CHECK_EQ(ret, 0) << TVMGetLastError(); + ICHECK_EQ(ret, 0) << TVMGetLastError(); break; } } @@ -164,10 +164,10 @@ class MetadataModuleNode : public ModuleNode { // Load the variables. std::vector variables; - CHECK(stream->Read(&variables)) << "Loading variables failed"; + ICHECK(stream->Read(&variables)) << "Loading variables failed"; uint64_t sz; - CHECK(stream->Read(&sz, sizeof(sz))) << "Loading metadata size failed"; - CHECK_EQ(static_cast(sz), variables.size()) + ICHECK(stream->Read(&sz, sizeof(sz))) << "Loading metadata size failed"; + ICHECK_EQ(static_cast(sz), variables.size()) << "The number of variables and ndarray counts must match"; // Load the list of ndarray. std::vector arrays; @@ -179,19 +179,19 @@ class MetadataModuleNode : public ModuleNode { std::unordered_map metadata; for (uint64_t i = 0; i < sz; i++) { - CHECK_EQ(metadata.count(variables[i]), 0U); + ICHECK_EQ(metadata.count(variables[i]), 0U); metadata[variables[i]] = arrays[i]; } // Load the symbol to list of required constant variables mapping std::vector symbols; - CHECK(stream->Read(&symbols)) << "Loading symbols failed"; - CHECK(stream->Read(&sz, sizeof(sz))) << "Loading number of symbols failed"; - CHECK_EQ(static_cast(sz), symbols.size()); + ICHECK(stream->Read(&symbols)) << "Loading symbols failed"; + ICHECK(stream->Read(&sz, sizeof(sz))) << "Loading number of symbols failed"; + ICHECK_EQ(static_cast(sz), symbols.size()); std::vector> const_vars; for (uint64_t i = 0; i < sz; i++) { std::vector vars; - CHECK(stream->Read(&vars)) << "Loading const variables failed"; + ICHECK(stream->Read(&vars)) << "Loading const variables failed"; const_vars.push_back(vars); } diff --git a/src/runtime/metal/metal_common.h b/src/runtime/metal/metal_common.h index 634ee305153b..d13ac7e78982 100644 --- a/src/runtime/metal/metal_common.h +++ b/src/runtime/metal/metal_common.h @@ -30,10 +30,10 @@ #import #import #import -#include #include #include #include +#include #include #include @@ -64,15 +64,15 @@ class MetalWorkspace final : public DeviceAPI { ~MetalWorkspace(); // Get command queue for given context. id GetCommandQueue(TVMContext ctx) { - CHECK_EQ(ctx.device_type, kDLMetal); - CHECK(ctx.device_id >= 0 && static_cast(ctx.device_id) < queues.size()) + ICHECK_EQ(ctx.device_type, kDLMetal); + ICHECK(ctx.device_id >= 0 && static_cast(ctx.device_id) < queues.size()) << "Invalid Metal device_id=" << ctx.device_id; return queues[ctx.device_id]; } // Get device for given context id GetDevice(TVMContext ctx) { - CHECK_EQ(ctx.device_type, kDLMetal); - CHECK(ctx.device_id >= 0 && static_cast(ctx.device_id) < devices.size()) + ICHECK_EQ(ctx.device_type, kDLMetal); + ICHECK(ctx.device_id >= 0 && static_cast(ctx.device_id) < devices.size()) << "Invalid Metal device_id=" << ctx.device_id; return devices[ctx.device_id]; } diff --git a/src/runtime/metal/metal_device_api.mm b/src/runtime/metal/metal_device_api.mm index 79007394b18f..0169a4c2ec28 100644 --- a/src/runtime/metal/metal_device_api.mm +++ b/src/runtime/metal/metal_device_api.mm @@ -43,7 +43,7 @@ *rv = int(index < devices.size()); return; } - CHECK_LT(index, devices.size()) << "Invalid device id " << index; + ICHECK_LT(index, devices.size()) << "Invalid device id " << index; switch (kind) { case kMaxThreadsPerBlock: { *rv = static_cast([devices[ctx.device_id] maxThreadsPerThreadgroup].width); @@ -101,11 +101,11 @@ int GetWarpSize(id dev) { id lib = [dev newLibraryWithSource:[NSString stringWithUTF8String:kDummyKernel] options:nil error:&error_msg]; - CHECK(lib != nil) << [[error_msg localizedDescription] UTF8String]; + ICHECK(lib != nil) << [[error_msg localizedDescription] UTF8String]; id f = [lib newFunctionWithName:[NSString stringWithUTF8String:"CopyKernel"]]; - CHECK(f != nil); + ICHECK(f != nil); id state = [dev newComputePipelineStateWithFunction:f error:&error_msg]; - CHECK(state != nil) << [[error_msg localizedDescription] UTF8String]; + ICHECK(state != nil) << [[error_msg localizedDescription] UTF8String]; return static_cast(state.threadExecutionWidth); } @@ -159,7 +159,7 @@ int GetWarpSize(id dev) { #endif */ id buf = [dev newBufferWithLength:nbytes options:storage_mode]; - CHECK(buf != nil); + ICHECK(buf != nil); return (void*)(CFBridgingRetain(buf)); } @@ -176,7 +176,7 @@ int GetWarpSize(id dev) { TVMContext ctx_to, DLDataType type_hint, TVMStreamHandle stream) { this->Init(); - CHECK(stream == nullptr); + ICHECK(stream == nullptr); TVMContext ctx = ctx_from; if (ctx_from.device_type == kDLCPU) ctx = ctx_to; id queue = GetCommandQueue(ctx); @@ -185,7 +185,7 @@ int GetWarpSize(id dev) { int to_dev_type = static_cast(ctx_to.device_type); if (from_dev_type == kDLMetal && to_dev_type == kDLMetal) { - CHECK_EQ(ctx_from.device_id, ctx_to.device_id) << "Metal disallow cross device copy."; + ICHECK_EQ(ctx_from.device_id, ctx_to.device_id) << "Metal disallow cross device copy."; id encoder = [cb blitCommandEncoder]; [encoder copyFromBuffer:(__bridge id)(from) sourceOffset:from_offset @@ -237,7 +237,7 @@ int GetWarpSize(id dev) { } void MetalWorkspace::StreamSync(TVMContext ctx, TVMStreamHandle stream) { - CHECK(stream == nullptr); + ICHECK(stream == nullptr); // commit an empty command buffer and wait until it completes. id queue = GetCommandQueue(ctx); id cb = [queue commandBuffer]; diff --git a/src/runtime/metal/metal_module.mm b/src/runtime/metal/metal_module.mm index 2d3a901c8524..7d46811fe78d 100644 --- a/src/runtime/metal/metal_module.mm +++ b/src/runtime/metal/metal_module.mm @@ -50,7 +50,7 @@ explicit MetalModuleNode(std::string data, std::string fmt, void SaveToFile(const std::string& file_name, const std::string& format) final { std::string fmt = GetFileFormat(file_name, format); - CHECK_EQ(fmt, fmt_) << "Can only save to format=" << fmt_; + ICHECK_EQ(fmt, fmt_) << "Can only save to format=" << fmt_; std::string meta_file = GetMetaFilePath(file_name); SaveMetaDataToFile(meta_file, fmap_); SaveBinaryToFile(file_name, data_); @@ -74,7 +74,7 @@ void SaveToBinary(dmlc::Stream* stream) final { // get a from primary context in device_id id GetPipelineState(size_t device_id, const std::string& func_name) { metal::MetalWorkspace* w = metal::MetalWorkspace::Global(); - CHECK_LT(device_id, w->devices.size()); + ICHECK_LT(device_id, w->devices.size()); // start lock scope. std::lock_guard lock(mutex_); if (finfo_.size() <= device_id) { @@ -118,16 +118,16 @@ void SaveToBinary(dmlc::Stream* stream) final { } id f = [e.lib newFunctionWithName:[NSString stringWithUTF8String:func_name.c_str()]]; - CHECK(f != nil) << "cannot find function " << func_name; + ICHECK(f != nil) << "cannot find function " << func_name; id state = [w->devices[device_id] newComputePipelineStateWithFunction:f error:&err_msg]; - CHECK(state != nil) << "cannot get state:" - << " for function " << func_name - << [[err_msg localizedDescription] UTF8String]; + ICHECK(state != nil) << "cannot get state:" + << " for function " << func_name + << [[err_msg localizedDescription] UTF8String]; // The state.threadExecutionWidth can change dynamically according // to the resource constraint in kernel, so it is not strictly hold // Turn of warp aware optimziation for now. - // CHECK_EQ(state.threadExecutionWidth, w->warp_size[device_id]); + // ICHECK_EQ(state.threadExecutionWidth, w->warp_size[device_id]); e.smap[func_name] = [state retain]; return state; } @@ -231,8 +231,8 @@ void operator()(TVMArgs args, TVMRetValue* rv, const ArgUnion* pack_args) const PackedFunc MetalModuleNode::GetFunction(const std::string& name, const ObjectPtr& sptr_to_self) { - CHECK_EQ(sptr_to_self.get(), this); - CHECK_NE(name, symbol::tvm_module_main) << "Device function do not have main"; + ICHECK_EQ(sptr_to_self.get(), this); + ICHECK_NE(name, symbol::tvm_module_main) << "Device function do not have main"; auto it = fmap_.find(name); if (it == fmap_.end()) return PackedFunc(); const FunctionInfo& info = it->second; diff --git a/src/runtime/micro/micro_session.cc b/src/runtime/micro/micro_session.cc index 1b12d8f341ff..ea7682d3de57 100644 --- a/src/runtime/micro/micro_session.cc +++ b/src/runtime/micro/micro_session.cc @@ -23,10 +23,10 @@ #include "micro_session.h" -#include #include #include #include +#include #include #include @@ -106,7 +106,7 @@ class MicroTransportChannel : public RPCChannel { int unframer_error = unframer_.Write((const uint8_t*)pending_chunk_.data(), pending_chunk_.size(), &bytes_consumed); - CHECK(bytes_consumed <= pending_chunk_.size()) + ICHECK(bytes_consumed <= pending_chunk_.size()) << "consumed " << bytes_consumed << " want <= " << pending_chunk_.size(); pending_chunk_ = pending_chunk_.substr(bytes_consumed); bytes_received += bytes_consumed; @@ -138,7 +138,7 @@ class MicroTransportChannel : public RPCChannel { } bool StartSession() { - CHECK(state_ == State::kReset) + ICHECK(state_ == State::kReset) << "MicroSession: state_: expected kReset, got " << uint8_t(state_); ::std::chrono::steady_clock::time_point start_time = ::std::chrono::steady_clock::now(); @@ -151,8 +151,8 @@ class MicroTransportChannel : public RPCChannel { end_time = session_start_end_time; } while (!session_.IsEstablished()) { - CHECK_EQ(kTvmErrorNoError, session_.Initialize()); - CHECK_EQ(kTvmErrorNoError, session_.StartSession()); + ICHECK_EQ(kTvmErrorNoError, session_.Initialize()); + ICHECK_EQ(kTvmErrorNoError, session_.StartSession()); ::std::chrono::microseconds time_remaining = ::std::max( ::std::chrono::microseconds{0}, ::std::chrono::duration_cast<::std::chrono::microseconds>( @@ -176,7 +176,7 @@ class MicroTransportChannel : public RPCChannel { size_t Send(const void* data, size_t size) override { const uint8_t* data_bytes = static_cast(data); tvm_crt_error_t err = session_.SendMessage(MessageType::kNormal, data_bytes, size); - CHECK(err == kTvmErrorNoError) << "SendMessage returned " << err; + ICHECK(err == kTvmErrorNoError) << "SendMessage returned " << err; return size; } @@ -191,7 +191,7 @@ class MicroTransportChannel : public RPCChannel { session_.ClearReceiveBuffer(); } if (num_bytes_recv == size) { - CHECK(message_buffer_ == nullptr || message_buffer_->ReadAvailable() > 0); + ICHECK(message_buffer_ == nullptr || message_buffer_->ReadAvailable() > 0); return num_bytes_recv; } } @@ -256,7 +256,7 @@ class MicroTransportChannel : public RPCChannel { return; } - CHECK_EQ(buf->Read(message, sizeof(message) - 1), message_size_bytes); + ICHECK_EQ(buf->Read(message, sizeof(message) - 1), message_size_bytes); message[message_size_bytes] = 0; LOG(INFO) << "remote: " << message; session_.ClearReceiveBuffer(); @@ -316,5 +316,5 @@ void TVMLogf(const char* fmt, ...) { LOG(INFO) << msg_buf; } -void TVMPlatformAbort(int error_code) { CHECK(false) << "TVMPlatformAbort: " << error_code; } +void TVMPlatformAbort(int error_code) { ICHECK(false) << "TVMPlatformAbort: " << error_code; } } diff --git a/src/runtime/minrpc/minrpc_server.h b/src/runtime/minrpc/minrpc_server.h index 565f92ad59be..62f7236b8e2a 100644 --- a/src/runtime/minrpc/minrpc_server.h +++ b/src/runtime/minrpc/minrpc_server.h @@ -46,7 +46,7 @@ #endif #if TVM_MINRPC_ENABLE_LOGGING -#include +#include #endif namespace tvm { diff --git a/src/runtime/module.cc b/src/runtime/module.cc index 8c3fb49ea7e0..ac2b60f8a383 100644 --- a/src/runtime/module.cc +++ b/src/runtime/module.cc @@ -39,7 +39,7 @@ void ModuleNode::Import(Module other) { static const PackedFunc* fimport_ = nullptr; if (fimport_ == nullptr) { fimport_ = runtime::Registry::Get("rpc.ImportRemoteModule"); - CHECK(fimport_ != nullptr); + ICHECK(fimport_ != nullptr); } (*fimport_)(GetRef(this), other); return; @@ -57,7 +57,7 @@ void ModuleNode::Import(Module other) { stack.push_back(next); } } - CHECK(!visited.count(this)) << "Cyclic dependency detected during import"; + ICHECK(!visited.count(this)) << "Cyclic dependency detected during import"; this->imports_.emplace_back(std::move(other)); } @@ -75,13 +75,13 @@ PackedFunc ModuleNode::GetFunction(const std::string& name, bool query_imports) Module Module::LoadFromFile(const std::string& file_name, const std::string& format) { std::string fmt = GetFileFormat(file_name, format); - CHECK(fmt.length() != 0) << "Cannot deduce format of file " << file_name; + ICHECK(fmt.length() != 0) << "Cannot deduce format of file " << file_name; if (fmt == "dll" || fmt == "dylib" || fmt == "dso") { fmt = "so"; } std::string load_f_name = "runtime.module.loadfile_" + fmt; const PackedFunc* f = Registry::Get(load_f_name); - CHECK(f != nullptr) << "Loader of " << format << "(" << load_f_name << ") is not presented."; + ICHECK(f != nullptr) << "Loader of " << format << "(" << load_f_name << ") is not presented."; Module m = (*f)(file_name, format); return m; } @@ -109,8 +109,8 @@ const PackedFunc* ModuleNode::GetFuncFromEnv(const std::string& name) { } if (pf == nullptr) { const PackedFunc* f = Registry::Get(name); - CHECK(f != nullptr) << "Cannot find function " << name - << " in the imported modules or global registry"; + ICHECK(f != nullptr) << "Cannot find function " << name + << " in the imported modules or global registry"; return f; } else { import_cache_.insert(std::make_pair(name, std::make_shared(pf))); diff --git a/src/runtime/ndarray.cc b/src/runtime/ndarray.cc index 9c1eeeb973d6..dae775606a7e 100644 --- a/src/runtime/ndarray.cc +++ b/src/runtime/ndarray.cc @@ -21,10 +21,10 @@ * \file ndarray.cc * \brief NDArray container infratructure. */ -#include #include #include #include +#include #include "runtime_base.h" @@ -39,9 +39,9 @@ namespace tvm { namespace runtime { inline void VerifyDataType(DLDataType dtype) { - CHECK_GE(dtype.lanes, 1); + ICHECK_GE(dtype.lanes, 1); if (dtype.code == kDLFloat) { - CHECK_EQ(dtype.bits % 8, 0); + ICHECK_EQ(dtype.bits % 8, 0); } else { // allow uint1 as a special flag for bool. if (dtype.bits == 1 && dtype.code == kDLUInt) return; @@ -53,9 +53,9 @@ inline void VerifyDataType(DLDataType dtype) { else if (dtype.bits == 4 && dtype.code == kDLInt) return; else - CHECK_EQ(dtype.bits % 8, 0); + ICHECK_EQ(dtype.bits % 8, 0); } - CHECK_EQ(dtype.bits & (dtype.bits - 1), 0); + ICHECK_EQ(dtype.bits & (dtype.bits - 1), 0); } inline size_t GetDataAlignment(const DLTensor& arr) { @@ -69,8 +69,8 @@ void ArrayCopyFromBytes(DLTensor* handle, const void* data, size_t nbytes) { cpu_ctx.device_type = kDLCPU; cpu_ctx.device_id = 0; size_t arr_size = GetDataSize(*handle); - CHECK_EQ(arr_size, nbytes) << "ArrayCopyFromBytes: size mismatch"; - CHECK(IsContiguous(*handle)) << "ArrayCopyFromBytes only support contiguous array for now"; + ICHECK_EQ(arr_size, nbytes) << "ArrayCopyFromBytes: size mismatch"; + ICHECK(IsContiguous(*handle)) << "ArrayCopyFromBytes only support contiguous array for now"; DeviceAPI::Get(handle->ctx) ->CopyDataFromTo(data, 0, handle->data, static_cast(handle->byte_offset), nbytes, cpu_ctx, handle->ctx, handle->dtype, nullptr); @@ -83,8 +83,8 @@ void ArrayCopyToBytes(const DLTensor* handle, void* data, size_t nbytes) { cpu_ctx.device_type = kDLCPU; cpu_ctx.device_id = 0; size_t arr_size = GetDataSize(*handle); - CHECK_EQ(arr_size, nbytes) << "ArrayCopyToBytes: size mismatch"; - CHECK(IsContiguous(*handle)) << "ArrayCopyToBytes only support contiguous array for now"; + ICHECK_EQ(arr_size, nbytes) << "ArrayCopyToBytes: size mismatch"; + ICHECK(IsContiguous(*handle)) << "ArrayCopyToBytes only support contiguous array for now"; DeviceAPI::Get(handle->ctx) ->CopyDataFromTo(handle->data, static_cast(handle->byte_offset), data, 0, nbytes, handle->ctx, cpu_ctx, handle->dtype, nullptr); @@ -153,7 +153,7 @@ struct NDArray::Internal { } static DLManagedTensor* ToDLPack(NDArray::Container* from) { - CHECK(from != nullptr); + ICHECK(from != nullptr); DLManagedTensor* ret = new DLManagedTensor(); ret->dl_tensor = from->dl_tensor; ret->manager_ctx = from; @@ -169,13 +169,13 @@ struct NDArray::Internal { }; NDArray NDArray::CreateView(std::vector shape, DLDataType dtype) { - CHECK(data_ != nullptr); - CHECK(get_mutable()->dl_tensor.strides == nullptr) << "Can only create view for compact tensor"; + ICHECK(data_ != nullptr); + ICHECK(get_mutable()->dl_tensor.strides == nullptr) << "Can only create view for compact tensor"; NDArray ret = Internal::Create(shape, dtype, get_mutable()->dl_tensor.ctx); ret.get_mutable()->dl_tensor.byte_offset = this->get_mutable()->dl_tensor.byte_offset; size_t curr_size = GetDataSize(this->get_mutable()->dl_tensor); size_t view_size = GetDataSize(ret.get_mutable()->dl_tensor); - CHECK_LE(view_size, curr_size) + ICHECK_LE(view_size, curr_size) << "Tries to create a view that has bigger memory than current one"; // increase ref count get_mutable()->IncRef(); @@ -211,25 +211,25 @@ NDArray NDArray::FromDLPack(DLManagedTensor* tensor) { } void NDArray::CopyToBytes(void* data, size_t nbytes) const { - CHECK(data != nullptr); - CHECK(data_ != nullptr); + ICHECK(data != nullptr); + ICHECK(data_ != nullptr); ArrayCopyToBytes(&get_mutable()->dl_tensor, data, nbytes); } void NDArray::CopyFromBytes(const void* data, size_t nbytes) { - CHECK(data != nullptr); - CHECK(data_ != nullptr); + ICHECK(data != nullptr); + ICHECK(data_ != nullptr); ArrayCopyFromBytes(&get_mutable()->dl_tensor, data, nbytes); } void NDArray::CopyFromTo(const DLTensor* from, DLTensor* to, TVMStreamHandle stream) { size_t from_size = GetDataSize(*from); size_t to_size = GetDataSize(*to); - CHECK_EQ(from_size, to_size) << "TVMArrayCopyFromTo: The size must exactly match"; + ICHECK_EQ(from_size, to_size) << "TVMArrayCopyFromTo: The size must exactly match"; - CHECK(from->ctx.device_type == to->ctx.device_type || from->ctx.device_type == kDLCPU || - to->ctx.device_type == kDLCPU || from->ctx.device_type == kDLCPUPinned || - to->ctx.device_type == kDLCPUPinned) + ICHECK(from->ctx.device_type == to->ctx.device_type || from->ctx.device_type == kDLCPU || + to->ctx.device_type == kDLCPU || from->ctx.device_type == kDLCPUPinned || + to->ctx.device_type == kDLCPUPinned) << "Can not copy across different ctx types directly"; // Use the context that is *not* a cpu context to get the correct device diff --git a/src/runtime/object.cc b/src/runtime/object.cc index dc5f1ceabbae..ad68c70698ea 100644 --- a/src/runtime/object.cc +++ b/src/runtime/object.cc @@ -20,9 +20,9 @@ * \file src/runtime/object.cc * \brief Object type management system. */ -#include #include #include +#include #include #include @@ -70,7 +70,7 @@ class TypeContext { if (child_tindex == parent_tindex) return true; { std::lock_guard lock(mutex_); - CHECK_LT(child_tindex, type_table_.size()); + ICHECK_LT(child_tindex, type_table_.size()); while (child_tindex > parent_tindex) { child_tindex = type_table_[child_tindex].parent_index; } @@ -87,10 +87,10 @@ class TypeContext { return it->second; } // try to allocate from parent's type table. - CHECK_LT(parent_tindex, type_table_.size()) + ICHECK_LT(parent_tindex, type_table_.size()) << " skey= " << skey << "static_index=" << static_tindex; TypeInfo& pinfo = type_table_[parent_tindex]; - CHECK_EQ(pinfo.index, parent_tindex); + ICHECK_EQ(pinfo.index, parent_tindex); // if parent cannot overflow, then this class cannot. if (!pinfo.child_slots_can_overflow) { @@ -104,8 +104,8 @@ class TypeContext { if (static_tindex != TypeIndex::kDynamic) { // statically assigned type allocated_tindex = static_tindex; - CHECK_LT(static_tindex, type_table_.size()); - CHECK_EQ(type_table_[allocated_tindex].allocated_slots, 0U) + ICHECK_LT(static_tindex, type_table_.size()); + ICHECK_EQ(type_table_[allocated_tindex].allocated_slots, 0U) << "Conflicting static index " << static_tindex << " between " << type_table_[allocated_tindex].name << " and " << skey; } else if (pinfo.allocated_slots + num_slots <= pinfo.num_slots) { @@ -114,15 +114,15 @@ class TypeContext { // update parent's state pinfo.allocated_slots += num_slots; } else { - CHECK(pinfo.child_slots_can_overflow) + ICHECK(pinfo.child_slots_can_overflow) << "Reach maximum number of sub-classes for " << pinfo.name; // allocate new entries. allocated_tindex = type_counter_; type_counter_ += num_slots; - CHECK_LE(type_table_.size(), type_counter_); + ICHECK_LE(type_table_.size(), type_counter_); type_table_.resize(type_counter_, TypeInfo()); } - CHECK_GT(allocated_tindex, parent_tindex); + ICHECK_GT(allocated_tindex, parent_tindex); // initialize the slot. type_table_[allocated_tindex].index = allocated_tindex; type_table_[allocated_tindex].parent_index = parent_tindex; @@ -138,21 +138,21 @@ class TypeContext { std::string TypeIndex2Key(uint32_t tindex) { std::lock_guard lock(mutex_); - CHECK(tindex < type_table_.size() && type_table_[tindex].allocated_slots != 0) + ICHECK(tindex < type_table_.size() && type_table_[tindex].allocated_slots != 0) << "Unknown type index " << tindex; return type_table_[tindex].name; } size_t TypeIndex2KeyHash(uint32_t tindex) { std::lock_guard lock(mutex_); - CHECK(tindex < type_table_.size() && type_table_[tindex].allocated_slots != 0) + ICHECK(tindex < type_table_.size() && type_table_[tindex].allocated_slots != 0) << "Unknown type index " << tindex; return type_table_[tindex].name_hash; } uint32_t TypeKey2Index(const std::string& skey) { auto it = type_key2index_.find(skey); - CHECK(it != type_key2index_.end()) + ICHECK(it != type_key2index_.end()) << "Cannot find type " << skey << ". Did you forget to register the node by TVM_REGISTER_NODE_TYPE ?"; return it->second; @@ -229,7 +229,7 @@ TVM_REGISTER_GLOBAL("runtime.DumpTypeTable").set_body_typed([](int min_child_cou int TVMObjectGetTypeIndex(TVMObjectHandle obj, unsigned* out_tindex) { API_BEGIN(); - CHECK(obj != nullptr); + ICHECK(obj != nullptr); out_tindex[0] = static_cast(obj)->type_index(); API_END(); } diff --git a/src/runtime/opencl/opencl_common.h b/src/runtime/opencl/opencl_common.h index 290f8c256508..fa118ed9525b 100644 --- a/src/runtime/opencl/opencl_common.h +++ b/src/runtime/opencl/opencl_common.h @@ -24,10 +24,10 @@ #ifndef TVM_RUNTIME_OPENCL_OPENCL_COMMON_H_ #define TVM_RUNTIME_OPENCL_OPENCL_COMMON_H_ -#include #include #include #include +#include /* There are many OpenCL platforms that do not yet support OpenCL 2.0, * hence we use 1.2 APIs, some of which are now deprecated. In order @@ -167,7 +167,7 @@ inline const char* CLGetErrorString(cl_int error) { * \param func Expression to call. */ #define OPENCL_CHECK_ERROR(e) \ - { CHECK(e == CL_SUCCESS) << "OpenCL Error, code=" << e << ": " << cl::CLGetErrorString(e); } + { ICHECK(e == CL_SUCCESS) << "OpenCL Error, code=" << e << ": " << cl::CLGetErrorString(e); } #define OPENCL_CALL(func) \ { \ @@ -221,9 +221,9 @@ class OpenCLWorkspace : public DeviceAPI { virtual bool IsOpenCLDevice(TVMContext ctx) { return ctx.device_type == kDLOpenCL; } // get the queue of the context cl_command_queue GetQueue(TVMContext ctx) { - CHECK(IsOpenCLDevice(ctx)); + ICHECK(IsOpenCLDevice(ctx)); this->Init(); - CHECK(ctx.device_id >= 0 && static_cast(ctx.device_id) < queues.size()) + ICHECK(ctx.device_id >= 0 && static_cast(ctx.device_id) < queues.size()) << "Invalid OpenCL device_id=" << ctx.device_id; return queues[ctx.device_id]; } diff --git a/src/runtime/opencl/opencl_device_api.cc b/src/runtime/opencl/opencl_device_api.cc index 83944cd4a83e..a3ec21e28f1d 100644 --- a/src/runtime/opencl/opencl_device_api.cc +++ b/src/runtime/opencl/opencl_device_api.cc @@ -47,7 +47,7 @@ void OpenCLWorkspace::GetAttr(TVMContext ctx, DeviceAttrKind kind, TVMRetValue* *rv = static_cast(index < devices.size()); return; } - CHECK_LT(index, devices.size()) << "Invalid device id " << index; + ICHECK_LT(index, devices.size()) << "Invalid device id " << index; switch (kind) { case kExist: break; @@ -119,7 +119,7 @@ void OpenCLWorkspace::GetAttr(TVMContext ctx, DeviceAttrKind kind, TVMRetValue* void* OpenCLWorkspace::AllocDataSpace(TVMContext ctx, size_t size, size_t alignment, DLDataType type_hint) { this->Init(); - CHECK(context != nullptr) << "No OpenCL device"; + ICHECK(context != nullptr) << "No OpenCL device"; cl_int err_code; cl_mem mptr = clCreateBuffer(this->context, CL_MEM_READ_WRITE, size, nullptr, &err_code); OPENCL_CHECK_ERROR(err_code); @@ -140,7 +140,7 @@ void OpenCLWorkspace::CopyDataFromTo(const void* from, size_t from_offset, void* TVMContext ctx_to, DLDataType type_hint, TVMStreamHandle stream) { this->Init(); - CHECK(stream == nullptr); + ICHECK(stream == nullptr); if (IsOpenCLDevice(ctx_from) && IsOpenCLDevice(ctx_to)) { OPENCL_CALL(clEnqueueCopyBuffer(this->GetQueue(ctx_to), static_cast((void*)from), // NOLINT(*) @@ -163,7 +163,7 @@ void OpenCLWorkspace::CopyDataFromTo(const void* from, size_t from_offset, void* } void OpenCLWorkspace::StreamSync(TVMContext ctx, TVMStreamHandle stream) { - CHECK(stream == nullptr); + ICHECK(stream == nullptr); OPENCL_CALL(clFinish(this->GetQueue(ctx))); } @@ -266,7 +266,7 @@ void OpenCLWorkspace::Init(const std::string& type_key, const std::string& devic this->context = clCreateContext(nullptr, this->devices.size(), &(this->devices[0]), nullptr, nullptr, &err_code); OPENCL_CHECK_ERROR(err_code); - CHECK_EQ(this->queues.size(), 0U); + ICHECK_EQ(this->queues.size(), 0U); for (size_t i = 0; i < this->devices.size(); ++i) { cl_device_id did = this->devices[i]; this->queues.push_back(clCreateCommandQueue(this->context, did, 0, &err_code)); diff --git a/src/runtime/opencl/opencl_module.cc b/src/runtime/opencl/opencl_module.cc index 590a446efe64..a4c61e47b376 100644 --- a/src/runtime/opencl/opencl_module.cc +++ b/src/runtime/opencl/opencl_module.cc @@ -50,7 +50,7 @@ class OpenCLWrappedFunc { } // invoke the function with void arguments void operator()(TVMArgs args, TVMRetValue* rv, void** void_args) const { - CHECK(w_->context != nullptr) << "No OpenCL device"; + ICHECK(w_->context != nullptr) << "No OpenCL device"; cl::OpenCLThreadEntry* t = w_->GetThreadEntry(); // get the kernel from thread local kernel table. if (entry_.kernel_id >= t->kernel_table.size()) { @@ -116,8 +116,8 @@ cl::OpenCLWorkspace* OpenCLModuleNode::GetGlobalWorkspace() { PackedFunc OpenCLModuleNode::GetFunction(const std::string& name, const ObjectPtr& sptr_to_self) { - CHECK_EQ(sptr_to_self.get(), this); - CHECK_NE(name, symbol::tvm_module_main) << "Device function do not have main"; + ICHECK_EQ(sptr_to_self.get(), this); + ICHECK_NE(name, symbol::tvm_module_main) << "Device function do not have main"; auto it = fmap_.find(name); if (it == fmap_.end()) return PackedFunc(); const FunctionInfo& info = it->second; @@ -125,13 +125,13 @@ PackedFunc OpenCLModuleNode::GetFunction(const std::string& name, std::vector arg_size(info.arg_types.size()); for (size_t i = 0; i < info.arg_types.size(); ++i) { DLDataType t = info.arg_types[i]; - CHECK_EQ(t.lanes, 1U); + ICHECK_EQ(t.lanes, 1U); if (t.code == kTVMOpaqueHandle) { // specially store pointer type size in OpenCL driver arg_size[i] = sizeof(void*); } else { uint32_t bits = t.bits; - CHECK_EQ(bits % 8, 0U); + ICHECK_EQ(bits % 8, 0U); arg_size[i] = bits / 8; } } @@ -142,7 +142,7 @@ PackedFunc OpenCLModuleNode::GetFunction(const std::string& name, void OpenCLModuleNode::SaveToFile(const std::string& file_name, const std::string& format) { std::string fmt = GetFileFormat(file_name, format); - CHECK_EQ(fmt, fmt_) << "Can only save to format=" << fmt_; + ICHECK_EQ(fmt, fmt_) << "Can only save to format=" << fmt_; std::string meta_file = GetMetaFilePath(file_name); SaveMetaDataToFile(meta_file, fmap_); SaveBinaryToFile(file_name, data_); diff --git a/src/runtime/pack_args.h b/src/runtime/pack_args.h index ae9771641b23..45cde22bda08 100644 --- a/src/runtime/pack_args.h +++ b/src/runtime/pack_args.h @@ -119,7 +119,7 @@ enum ArgConvertCode { }; inline ArgConvertCode GetArgConvertCode(DLDataType t) { - CHECK_EQ(t.lanes, 1U) << "Cannot pass vector type argument to devic function for now"; + ICHECK_EQ(t.lanes, 1U) << "Cannot pass vector type argument to devic function for now"; if (t.code == kDLInt) { if (t.bits == 64U) return INT64_TO_INT64; if (t.bits == 32U) return INT64_TO_INT32; @@ -284,7 +284,7 @@ inline size_t NumBufferArgs(const std::vector& arg_types) { } } for (size_t i = base; i < arg_types.size(); ++i) { - CHECK(arg_types[i].code != kTVMOpaqueHandle) << "Device function need to be organized"; + ICHECK(arg_types[i].code != kTVMOpaqueHandle) << "Device function need to be organized"; } return base; } diff --git a/src/runtime/registry.cc b/src/runtime/registry.cc index 641532a83927..6e74dc354259 100644 --- a/src/runtime/registry.cc +++ b/src/runtime/registry.cc @@ -21,9 +21,9 @@ * \file registry.cc * \brief The global registry of packed function. */ -#include #include #include +#include #include #include @@ -65,7 +65,7 @@ Registry& Registry::Register(const std::string& name, bool can_override) { // N Manager* m = Manager::Global(); std::lock_guard lock(m->mutex); if (m->fmap.count(name)) { - CHECK(can_override) << "Global PackedFunc " << name << " is already registered"; + ICHECK(can_override) << "Global PackedFunc " << name << " is already registered"; } Registry* r = new Registry(); diff --git a/src/runtime/rocm/rocm_common.h b/src/runtime/rocm/rocm_common.h index 6ed9bccb1ab7..b258e37508df 100644 --- a/src/runtime/rocm/rocm_common.h +++ b/src/runtime/rocm/rocm_common.h @@ -43,10 +43,10 @@ namespace runtime { } \ } -#define ROCM_CALL(func) \ - { \ - hipError_t e = (func); \ - CHECK(e == hipSuccess) << "ROCM HIP: " << hipGetErrorString(e); \ +#define ROCM_CALL(func) \ + { \ + hipError_t e = (func); \ + ICHECK(e == hipSuccess) << "ROCM HIP: " << hipGetErrorString(e); \ } /*! \brief Thread local workspace */ diff --git a/src/runtime/rocm/rocm_device_api.cc b/src/runtime/rocm/rocm_device_api.cc index 7f5bc99380a4..26e44eca0d12 100644 --- a/src/runtime/rocm/rocm_device_api.cc +++ b/src/runtime/rocm/rocm_device_api.cc @@ -21,12 +21,12 @@ * \file rocm_device_api.cc * \brief GPU specific API */ -#include #include #include #include #include #include +#include #include "rocm_common.h" @@ -122,7 +122,7 @@ class ROCMDeviceAPI final : public DeviceAPI { void* AllocDataSpace(TVMContext ctx, size_t nbytes, size_t alignment, DLDataType type_hint) final { ROCM_CALL(hipSetDevice(ctx.device_id)); - CHECK_EQ(256 % alignment, 0U) << "ROCM space is aligned at 256 bytes"; + ICHECK_EQ(256 % alignment, 0U) << "ROCM space is aligned at 256 bytes"; void* ret; ROCM_CALL(hipMalloc(&ret, nbytes)); return ret; diff --git a/src/runtime/rocm/rocm_module.cc b/src/runtime/rocm/rocm_module.cc index 8a83599c644b..567557c56794 100644 --- a/src/runtime/rocm/rocm_module.cc +++ b/src/runtime/rocm/rocm_module.cc @@ -70,7 +70,7 @@ class ROCMModuleNode : public runtime::ModuleNode { std::string fmt = GetFileFormat(file_name, format); std::string meta_file = GetMetaFilePath(file_name); // note: llvm and asm formats are not laodable, so we don't save them - CHECK_EQ(fmt, fmt_) << "Can only save to format=" << fmt_; + ICHECK_EQ(fmt, fmt_) << "Can only save to format=" << fmt_; SaveMetaDataToFile(meta_file, fmap_); SaveBinaryToFile(file_name, data_); } @@ -121,7 +121,7 @@ class ROCMModuleNode : public runtime::ModuleNode { size_t nbytes = 0; ROCM_DRIVER_CALL(hipModuleGetGlobal(&global, &nbytes, module_[device_id], global_name.c_str())); - CHECK_EQ(nbytes, expect_nbytes); + ICHECK_EQ(nbytes, expect_nbytes); return global; } @@ -189,8 +189,8 @@ class ROCMWrappedFunc { PackedFunc ROCMModuleNode::GetFunction(const std::string& name, const ObjectPtr& sptr_to_self) { - CHECK_EQ(sptr_to_self.get(), this); - CHECK_NE(name, symbol::tvm_module_main) << "Device function do not have main"; + ICHECK_EQ(sptr_to_self.get(), this); + ICHECK_NE(name, symbol::tvm_module_main) << "Device function do not have main"; auto it = fmap_.find(name); if (it == fmap_.end()) return PackedFunc(); const FunctionInfo& info = it->second; diff --git a/src/runtime/rpc/rpc_device_api.cc b/src/runtime/rpc/rpc_device_api.cc index 196a97ecbd66..943990fd9585 100644 --- a/src/runtime/rpc/rpc_device_api.cc +++ b/src/runtime/rpc/rpc_device_api.cc @@ -20,9 +20,9 @@ /*! * \file rpc_device_api.cc */ -#include #include #include +#include #include @@ -71,7 +71,7 @@ class RPCDeviceAPI final : public DeviceAPI { int from_dev_type = ctx_from.device_type; int to_dev_type = ctx_to.device_type; if (from_dev_type > kRPCSessMask && to_dev_type > kRPCSessMask) { - CHECK(ctx_from.device_type == ctx_to.device_type) + ICHECK(ctx_from.device_type == ctx_to.device_type) << "Cannot copy across two different remote session"; auto remote_ctx_from = RemoveSessMask(ctx_from); auto remote_ctx_to = RemoveSessMask(ctx_to); @@ -104,7 +104,7 @@ class RPCDeviceAPI final : public DeviceAPI { private: std::shared_ptr GetSess(TVMContext ctx) { int dev_type = ctx.device_type; - CHECK_GE(dev_type, kRPCSessMask); + ICHECK_GE(dev_type, kRPCSessMask); int tbl_index = dev_type / kRPCSessMask - 1; return RPCSession::Get(tbl_index); } diff --git a/src/runtime/rpc/rpc_endpoint.cc b/src/runtime/rpc/rpc_endpoint.cc index 2deae07b0315..0f526007f49e 100644 --- a/src/runtime/rpc/rpc_endpoint.cc +++ b/src/runtime/rpc/rpc_endpoint.cc @@ -122,7 +122,7 @@ class RPCEndpoint::EventHandler : public dmlc::Stream { break; case kRecvPacketNumBytes: { uint64_t packet_nbytes; - CHECK(this->Read(&packet_nbytes)); + ICHECK(this->Read(&packet_nbytes)); if (packet_nbytes != 0) { this->SwitchToState(kProcessPacket); this->RequestBytes(packet_nbytes); @@ -178,7 +178,7 @@ class RPCEndpoint::EventHandler : public dmlc::Stream { << args[i].AsObjectRef()->GetTypeKey() << " is not supported by RPC"; } else if (tcode == kTVMContext) { DLContext ctx = args[i]; - CHECK_LT(static_cast(ctx.device_type), kRPCSessMask) + ICHECK_LT(static_cast(ctx.device_type), kRPCSessMask) << "InternalError: cannot pass RPC context in the channel"; } } @@ -254,7 +254,7 @@ class RPCEndpoint::EventHandler : public dmlc::Stream { void SwitchToState(State state) { // invariant if (state != kCopyAckReceived) { - CHECK_EQ(pending_request_bytes_, 0U) << "state=" << state; + ICHECK_EQ(pending_request_bytes_, 0U) << "state=" << state; } // need to actively flush the writer // so the data get pushed out. @@ -262,7 +262,7 @@ class RPCEndpoint::EventHandler : public dmlc::Stream { flush_writer_(); } state_ = state; - CHECK(state != kInitHeader) << "cannot switch to init header"; + ICHECK(state != kInitHeader) << "cannot switch to init header"; if (state == kRecvPacketNumBytes) { this->RequestBytes(sizeof(uint64_t)); // recycle arena for the next session. @@ -280,7 +280,7 @@ class RPCEndpoint::EventHandler : public dmlc::Stream { this->RequestBytes(len); return; } else { - CHECK_EQ(init_header_step_, 1); + ICHECK_EQ(init_header_step_, 1); this->ReadArray(dmlc::BeginPtr(*remote_key_), remote_key_->length()); this->SwitchToState(kRecvPacketNumBytes); } @@ -378,7 +378,7 @@ class RPCEndpoint::EventHandler : public dmlc::Stream { LOG(FATAL) << "RPCError: Error caught from RPC call:\n" << msg; } - CHECK(setreturn != nullptr) << "fsetreturn not available"; + ICHECK(setreturn != nullptr) << "fsetreturn not available"; setreturn(args); this->SwitchToState(kReturnReceived); @@ -518,10 +518,10 @@ class RPCEndpoint::EventHandler : public dmlc::Stream { TVMArgs args = RecvPackedSeq(); try { - CHECK(serving_session_ == nullptr) << "Server has already been initialized"; + ICHECK(serving_session_ == nullptr) << "Server has already been initialized"; std::string server_protocol_ver = kRPCProtocolVer; - CHECK_EQ(client_protocol_ver, server_protocol_ver) + ICHECK_EQ(client_protocol_ver, server_protocol_ver) << "Server[" << name_ << "]: Client protocol version mismatch with the server " << " server protocol=" << server_protocol_ver << ", client protocol=" << client_protocol_ver; @@ -538,7 +538,7 @@ class RPCEndpoint::EventHandler : public dmlc::Stream { } auto* fconstructor = Registry::Get(constructor_name); - CHECK(fconstructor != nullptr) << " Cannot find session constructor " << constructor_name; + ICHECK(fconstructor != nullptr) << " Cannot find session constructor " << constructor_name; TVMRetValue con_ret; try { @@ -549,12 +549,12 @@ class RPCEndpoint::EventHandler : public dmlc::Stream { << e.what(); } - CHECK_EQ(con_ret.type_code(), kTVMModuleHandle) + ICHECK_EQ(con_ret.type_code(), kTVMModuleHandle) << "Server[" << name_ << "]:" << " Constructor " << constructor_name << " need to return an RPCModule"; Module mod = con_ret; std::string tkey = mod->type_key(); - CHECK_EQ(tkey, "rpc") << "Constructor " << constructor_name << " to return an RPCModule"; + ICHECK_EQ(tkey, "rpc") << "Constructor " << constructor_name << " to return an RPCModule"; serving_session_ = RPCModuleGetSession(mod); this->ReturnVoid(); } catch (const std::runtime_error& e) { @@ -606,9 +606,9 @@ class RPCEndpoint::EventHandler : public dmlc::Stream { private: RPCSession* GetServingSession() const { - CHECK(serving_session_ != nullptr) + ICHECK(serving_session_ != nullptr) << "Need to call InitRemoteSession first before any further actions"; - CHECK(!serving_session_->IsAsync() || async_server_mode_) + ICHECK(!serving_session_->IsAsync() || async_server_mode_) << "Cannot host an async session in a non-Event driven server"; return serving_session_.get(); @@ -616,7 +616,7 @@ class RPCEndpoint::EventHandler : public dmlc::Stream { // Utility functions // Internal read function, update pending_request_bytes_ size_t Read(void* data, size_t size) final { - CHECK_LE(size, pending_request_bytes_); + ICHECK_LE(size, pending_request_bytes_); reader_->Read(data, size); pending_request_bytes_ -= size; return size; @@ -693,10 +693,10 @@ void RPCEndpoint::Init() { handler_->SendPackedSeq(args.values, args.type_codes, args.num_args, true); code = HandleUntilReturnEvent(true, [rv](TVMArgs args) { - CHECK_EQ(args.size(), 1); + ICHECK_EQ(args.size(), 1); *rv = args[0]; }); - CHECK(code == RPCCode::kReturn) << "code=" << static_cast(code); + ICHECK(code == RPCCode::kReturn) << "code=" << static_cast(code); }); } @@ -739,7 +739,7 @@ void RPCEndpoint::ServerLoop() { (*f)(); } TVMRetValue rv; - CHECK(HandleUntilReturnEvent(false, [](TVMArgs) {}) == RPCCode::kShutdown); + ICHECK(HandleUntilReturnEvent(false, [](TVMArgs) {}) == RPCCode::kShutdown); if (const auto* f = Registry::Get("tvm.rpc.server.shutdown")) { (*f)(); } @@ -757,7 +757,7 @@ int RPCEndpoint::ServerAsyncIOEventHandler(const std::string& in_bytes, int even [this](const void* data, size_t size) { return channel_->Send(data, size); }, writer_.bytes_available()); } - CHECK(code != RPCCode::kReturn && code != RPCCode::kCopyAck); + ICHECK(code != RPCCode::kReturn && code != RPCCode::kCopyAck); if (code == RPCCode::kShutdown) return 0; if (writer_.bytes_available() != 0) return 2; return 1; @@ -781,7 +781,7 @@ void RPCEndpoint::InitRemoteSession(TVMArgs args) { handler_->SendPackedSeq(args.values, args.type_codes, args.num_args, true); code = HandleUntilReturnEvent(true, [](TVMArgs args) {}); - CHECK(code == RPCCode::kReturn) << "code=" << static_cast(code); + ICHECK(code == RPCCode::kReturn) << "code=" << static_cast(code); } // Get remote function with name @@ -804,7 +804,7 @@ void RPCEndpoint::CallFunc(RPCSession::PackedFuncHandle h, const TVMValue* arg_v handler_->SendPackedSeq(arg_values, arg_type_codes, num_args, true); code = HandleUntilReturnEvent(true, encode_return); - CHECK(code == RPCCode::kReturn) << "code=" << static_cast(code); + ICHECK(code == RPCCode::kReturn) << "code=" << static_cast(code); } void RPCEndpoint::CopyToRemote(void* from, size_t from_offset, void* to, size_t to_offset, @@ -827,7 +827,7 @@ void RPCEndpoint::CopyToRemote(void* from, size_t from_offset, void* to, size_t handler_->Write(type_hint); handler_->WriteArray(reinterpret_cast(from) + from_offset, data_size); - CHECK(HandleUntilReturnEvent(true, [](TVMArgs) {}) == RPCCode::kReturn); + ICHECK(HandleUntilReturnEvent(true, [](TVMArgs) {}) == RPCCode::kReturn); } void RPCEndpoint::CopyFromRemote(void* from, size_t from_offset, void* to, size_t to_offset, @@ -850,7 +850,7 @@ void RPCEndpoint::CopyFromRemote(void* from, size_t from_offset, void* to, size_ handler_->Write(type_hint); TVMRetValue rv; - CHECK(HandleUntilReturnEvent(true, [](TVMArgs) {}) == RPCCode::kCopyAck); + ICHECK(HandleUntilReturnEvent(true, [](TVMArgs) {}) == RPCCode::kCopyAck); handler_->ReadArray(reinterpret_cast(to) + to_offset, data_size); handler_->FinishCopyAck(); } @@ -917,7 +917,7 @@ void RPCCopyAmongRemote(RPCSession* handler, TVMArgs args, TVMRetValue* rv) { if (ctx.device_type == kDLCPU) { ctx = ctx_to; } else { - CHECK(ctx_to.device_type == kDLCPU || ctx_to.device_type == ctx_from.device_type) + ICHECK(ctx_to.device_type == kDLCPU || ctx_to.device_type == ctx_from.device_type) << "Can not copy across different ctx types directly"; } handler->GetDeviceAPI(ctx)->CopyDataFromTo(from, from_offset, to, to_offset, size, ctx_from, @@ -957,7 +957,7 @@ void RPCEndpoint::EventHandler::HandleSyscall(RPCCode code) { } if (state_ != kWaitForAsyncCallback) { - CHECK_EQ(state_, kRecvPacketNumBytes); + ICHECK_EQ(state_, kRecvPacketNumBytes); } } diff --git a/src/runtime/rpc/rpc_module.cc b/src/runtime/rpc/rpc_module.cc index d1eb89164fb7..a3d888e927ed 100644 --- a/src/runtime/rpc/rpc_module.cc +++ b/src/runtime/rpc/rpc_module.cc @@ -109,7 +109,7 @@ class RPCWrappedFunc : public Object { // remove a remote session mask TVMContext RemoveSessMask(TVMContext ctx) const { int dev_type = ctx.device_type; - CHECK_EQ(dev_type / kRPCSessMask, sess_->table_index() + 1) + ICHECK_EQ(dev_type / kRPCSessMask, sess_->table_index() + 1) << "Can not pass in local context or context with a different remote session"; ctx.device_type = static_cast(ctx.device_type % kRPCSessMask); return ctx; @@ -145,7 +145,7 @@ class RPCWrappedFunc : public Object { data->dl_tensor.ctx.device_type = static_cast( static_cast(tensor->ctx.device_type) + kRPCSessMask * (sess_->table_index() + 1)); // check strides. - CHECK(tensor->strides == nullptr); + ICHECK(tensor->strides == nullptr); // setup byteoffset data->dl_tensor.byte_offset = tensor->byte_offset; return ret; @@ -190,7 +190,7 @@ class RPCModuleNode final : public ModuleNode { InitRemoteFunc(&remote_get_time_evaluator_, "runtime.RPCTimeEvaluator"); // Remove session mask because we pass ctx by parts. int dev_type = ctx.device_type; - CHECK_EQ(dev_type / kRPCSessMask, sess_->table_index() + 1) + ICHECK_EQ(dev_type / kRPCSessMask, sess_->table_index() + 1) << "ValueError: Need to pass the matched remote context to RPCModule.GetTimeEvaluator"; ctx.device_type = static_cast(ctx.device_type % kRPCSessMask); @@ -224,7 +224,7 @@ class RPCModuleNode final : public ModuleNode { void InitRemoteFunc(FType* func, const std::string& name) { if (*func != nullptr) return; RPCSession::PackedFuncHandle handle = sess_->GetFunction(name); - CHECK(handle != nullptr) << "Cannot found remote function " << name; + ICHECK(handle != nullptr) << "Cannot found remote function " << name; *func = WrapRemoteFunc(handle); } @@ -253,9 +253,9 @@ void* RPCWrappedFunc::UnwrapRemoteValueToHandle(const TVMArgValue& arg) const { if (arg.type_code() == kTVMModuleHandle) { Module mod = arg; std::string tkey = mod->type_key(); - CHECK_EQ(tkey, "rpc") << "ValueError: Cannot pass a non-RPC module to remote"; + ICHECK_EQ(tkey, "rpc") << "ValueError: Cannot pass a non-RPC module to remote"; auto* rmod = static_cast(mod.operator->()); - CHECK(rmod->sess() == sess_) + ICHECK(rmod->sess() == sess_) << "ValueError: Cannot pass in module into a different remote session"; return rmod->module_handle(); } else { @@ -270,22 +270,22 @@ void RPCWrappedFunc::WrapRemoteReturnToValue(TVMArgs args, TVMRetValue* rv) cons if (tcode == kTVMNullptr) return; if (tcode == kTVMPackedFuncHandle) { - CHECK_EQ(args.size(), 2); + ICHECK_EQ(args.size(), 2); void* handle = args[1]; auto wf = std::make_shared(handle, sess_); *rv = PackedFunc([wf](TVMArgs args, TVMRetValue* rv) { return wf->operator()(args, rv); }); } else if (tcode == kTVMModuleHandle) { - CHECK_EQ(args.size(), 2); + ICHECK_EQ(args.size(), 2); void* handle = args[1]; auto n = make_object(handle, sess_); *rv = Module(n); } else if (tcode == kTVMDLTensorHandle || tcode == kTVMNDArrayHandle) { - CHECK_EQ(args.size(), 3); + ICHECK_EQ(args.size(), 3); DLTensor* tensor = args[1]; void* nd_handle = args[2]; *rv = WrapRemoteNDArray(tensor, nd_handle); } else { - CHECK_EQ(args.size(), 2); + ICHECK_EQ(args.size(), 2); *rv = args[1]; } } @@ -298,7 +298,7 @@ Module CreateRPCSessionModule(std::shared_ptr sess) { std::shared_ptr RPCModuleGetSession(Module mod) { std::string tkey = mod->type_key(); - CHECK_EQ(tkey, "rpc") << "ValueError: Cannot pass a non-RPC module to remote"; + ICHECK_EQ(tkey, "rpc") << "ValueError: Cannot pass a non-RPC module to remote"; auto* rmod = static_cast(mod.operator->()); return rmod->sess(); } @@ -340,11 +340,11 @@ inline void CPUCacheFlush(int begin_index, const TVMArgs& args) { PackedFunc WrapTimeEvaluator(PackedFunc pf, TVMContext ctx, int number, int repeat, int min_repeat_ms, PackedFunc f_preproc) { - CHECK(pf != nullptr); + ICHECK(pf != nullptr); if (static_cast(ctx.device_type) == static_cast(kDLMicroDev)) { auto get_micro_time_evaluator = runtime::Registry::Get("micro._GetMicroTimeEvaluator"); - CHECK(get_micro_time_evaluator != nullptr) << "micro backend not enabled"; + ICHECK(get_micro_time_evaluator != nullptr) << "micro backend not enabled"; return (*get_micro_time_evaluator)(pf, ctx, number, repeat); } @@ -414,7 +414,7 @@ TVM_REGISTER_GLOBAL("runtime.RPCTimeEvaluator") PackedFunc f_preproc; if (!f_preproc_name.empty()) { auto* pf_preproc = runtime::Registry::Get(f_preproc_name); - CHECK(pf_preproc != nullptr) + ICHECK(pf_preproc != nullptr) << "Cannot find " << f_preproc_name << " in the global function"; f_preproc = *pf_preproc; } @@ -423,11 +423,11 @@ TVM_REGISTER_GLOBAL("runtime.RPCTimeEvaluator") } } else { auto* pf = runtime::Registry::Get(name); - CHECK(pf != nullptr) << "Cannot find " << name << " in the global function"; + ICHECK(pf != nullptr) << "Cannot find " << name << " in the global function"; PackedFunc f_preproc; if (!f_preproc_name.empty()) { auto* pf_preproc = runtime::Registry::Get(f_preproc_name); - CHECK(pf_preproc != nullptr) + ICHECK(pf_preproc != nullptr) << "Cannot find " << f_preproc_name << " in the global function"; f_preproc = *pf_preproc; } @@ -452,20 +452,20 @@ TVM_REGISTER_GLOBAL("tvm.rpc.server.ModuleGetFunction") // functions to access an RPC module. TVM_REGISTER_GLOBAL("rpc.LoadRemoteModule").set_body_typed([](Module sess, std::string name) { std::string tkey = sess->type_key(); - CHECK_EQ(tkey, "rpc"); + ICHECK_EQ(tkey, "rpc"); return static_cast(sess.operator->())->LoadModule(name); }); TVM_REGISTER_GLOBAL("rpc.ImportRemoteModule").set_body_typed([](Module parent, Module child) { std::string tkey = parent->type_key(); - CHECK_EQ(tkey, "rpc"); + ICHECK_EQ(tkey, "rpc"); static_cast(parent.operator->())->ImportModule(child); }); TVM_REGISTER_GLOBAL("rpc.SessTableIndex").set_body([](TVMArgs args, TVMRetValue* rv) { Module m = args[0]; std::string tkey = m->type_key(); - CHECK_EQ(tkey, "rpc"); + ICHECK_EQ(tkey, "rpc"); *rv = static_cast(m.operator->())->sess()->table_index(); }); diff --git a/src/runtime/rpc/rpc_pipe_impl.cc b/src/runtime/rpc/rpc_pipe_impl.cc index 2f4243574909..6f2f7e22deb4 100644 --- a/src/runtime/rpc/rpc_pipe_impl.cc +++ b/src/runtime/rpc/rpc_pipe_impl.cc @@ -78,8 +78,8 @@ class PipeChannel final : public RPCChannel { Module CreatePipeClient(std::vector cmd) { int parent2child[2]; int child2parent[2]; - CHECK_EQ(pipe(parent2child), 0); - CHECK_EQ(pipe(child2parent), 0); + ICHECK_EQ(pipe(parent2child), 0); + ICHECK_EQ(pipe(child2parent), 0); int parent_read = child2parent[0]; int parent_write = parent2child[1]; diff --git a/src/runtime/rpc/rpc_server_env.cc b/src/runtime/rpc/rpc_server_env.cc index cb25150449a1..7ceb12caaf1f 100644 --- a/src/runtime/rpc/rpc_server_env.cc +++ b/src/runtime/rpc/rpc_server_env.cc @@ -31,7 +31,7 @@ namespace runtime { std::string RPCGetPath(const std::string& name) { // do live lookup everytime as workpath can change. const PackedFunc* f = runtime::Registry::Get("tvm.rpc.server.workpath"); - CHECK(f != nullptr) << "require tvm.rpc.server.workpath"; + ICHECK(f != nullptr) << "require tvm.rpc.server.workpath"; return (*f)(name); } diff --git a/src/runtime/rpc/rpc_session.cc b/src/runtime/rpc/rpc_session.cc index 9e05e5d1628d..f5405f0c2fa0 100644 --- a/src/runtime/rpc/rpc_session.cc +++ b/src/runtime/rpc/rpc_session.cc @@ -108,7 +108,7 @@ class RPCSessTable { } // Get session from table std::shared_ptr Get(int index) { - CHECK(index >= 0 && index < kMaxRPCSession); + ICHECK(index >= 0 && index < kMaxRPCSession); return tbl_[index].lock(); } // Insert session into table. @@ -137,7 +137,7 @@ std::shared_ptr RPCSession::Get(int table_index) { } void RPCSession::InsertToSessionTable(std::shared_ptr sess) { - CHECK_EQ(sess->table_index_, 0); + ICHECK_EQ(sess->table_index_, 0); sess->table_index_ = RPCSessTable::Global()->Insert(sess); } diff --git a/src/runtime/rpc/rpc_socket_impl.cc b/src/runtime/rpc/rpc_socket_impl.cc index 77a743be0de6..4e7fe3196d45 100644 --- a/src/runtime/rpc/rpc_socket_impl.cc +++ b/src/runtime/rpc/rpc_socket_impl.cc @@ -70,17 +70,17 @@ std::shared_ptr RPCConnect(std::string url, int port, std::string k support::TCPSocket sock; support::SockAddr addr(url.c_str(), port); sock.Create(addr.ss_family()); - CHECK(sock.Connect(addr)) << "Connect to " << addr.AsString() << " failed"; + ICHECK(sock.Connect(addr)) << "Connect to " << addr.AsString() << " failed"; // hand shake std::ostringstream os; int code = kRPCMagic; int keylen = static_cast(key.length()); - CHECK_EQ(sock.SendAll(&code, sizeof(code)), sizeof(code)); - CHECK_EQ(sock.SendAll(&keylen, sizeof(keylen)), sizeof(keylen)); + ICHECK_EQ(sock.SendAll(&code, sizeof(code)), sizeof(code)); + ICHECK_EQ(sock.SendAll(&keylen, sizeof(keylen)), sizeof(keylen)); if (keylen != 0) { - CHECK_EQ(sock.SendAll(key.c_str(), keylen), keylen); + ICHECK_EQ(sock.SendAll(key.c_str(), keylen), keylen); } - CHECK_EQ(sock.RecvAll(&code, sizeof(code)), sizeof(code)); + ICHECK_EQ(sock.RecvAll(&code, sizeof(code)), sizeof(code)); if (code == kRPCMagic + 2) { sock.Close(); LOG(FATAL) << "URL " << url << ":" << port << " cannot find server that matches key=" << key; @@ -91,11 +91,11 @@ std::shared_ptr RPCConnect(std::string url, int port, std::string k sock.Close(); LOG(FATAL) << "URL " << url << ":" << port << " is not TVM RPC server"; } - CHECK_EQ(sock.RecvAll(&keylen, sizeof(keylen)), sizeof(keylen)); + ICHECK_EQ(sock.RecvAll(&keylen, sizeof(keylen)), sizeof(keylen)); std::string remote_key; if (keylen != 0) { remote_key.resize(keylen); - CHECK_EQ(sock.RecvAll(&remote_key[0], keylen), keylen); + ICHECK_EQ(sock.RecvAll(&remote_key[0], keylen), keylen); } auto endpt = RPCEndpoint::Create(std::unique_ptr(new SockChannel(sock)), key, remote_key); diff --git a/src/runtime/stackvm/stackvm.cc b/src/runtime/stackvm/stackvm.cc index 042815b3d68b..4a5211e9c829 100644 --- a/src/runtime/stackvm/stackvm.cc +++ b/src/runtime/stackvm/stackvm.cc @@ -360,7 +360,7 @@ void StackVM::Run(State* s) const { } case PUSH_VALUE: { int relpos = code[pc + 1].v_int; - CHECK_LE(relpos, 0); + ICHECK_LE(relpos, 0); stack[sp + 1] = stack[sp + relpos]; sp += 1; pc += 2; @@ -390,7 +390,7 @@ void StackVM::Run(State* s) const { break; } case ASSERT: { - CHECK(stack[sp].v_int64) << str_data[code[pc + 1].v_int]; + ICHECK(stack[sp].v_int64) << str_data[code[pc + 1].v_int]; sp -= 1; pc += 2; break; @@ -417,8 +417,8 @@ void StackVM::Run(State* s) const { } case ASSERT_SP: { int64_t expected = code[pc + 1].v_int; - CHECK_EQ(sp, expected) << "sp assertion failed, expected=" << expected << " now=" << sp - << ", pc=" << pc; + ICHECK_EQ(sp, expected) << "sp assertion failed, expected=" << expected << " now=" << sp + << ", pc=" << pc; pc += 2; break; } @@ -594,19 +594,19 @@ void StackVM::Run(State* s) const { break; } } - CHECK_GE(sp, alloca_sp) << "touch allocated space"; - CHECK_LT(sp, stack_cap) << "Stack overflow"; + ICHECK_GE(sp, alloca_sp) << "touch allocated space"; + ICHECK_LT(sp, stack_cap) << "Stack overflow"; } } const PackedFunc& StackVM::GetExtern(State* s, int fid) const { - CHECK_LT(static_cast(fid), extern_func_cache_.size()); + ICHECK_LT(static_cast(fid), extern_func_cache_.size()); // allow race write in this, since write is idempotent PackedFunc& f = extern_func_cache_[fid]; if (f == nullptr) { - CHECK(s->mod_ctx != nullptr) << "No local context is set in stackvm"; + ICHECK(s->mod_ctx != nullptr) << "No local context is set in stackvm"; const PackedFunc* pf = s->mod_ctx->GetFuncFromEnv(extern_func_name[fid]); - CHECK(pf != nullptr); + ICHECK(pf != nullptr); f = *pf; } return f; diff --git a/src/runtime/stackvm/stackvm.h b/src/runtime/stackvm/stackvm.h index 09581a6d0b62..e57cb0b03952 100644 --- a/src/runtime/stackvm/stackvm.h +++ b/src/runtime/stackvm/stackvm.h @@ -162,7 +162,7 @@ class StackVM { /*! * \brief Assert condition is true. * \code - * CHECK(stack[sp]) << str_data[code[pc + 1].v_int]; + * ICHECK(stack[sp]) << str_data[code[pc + 1].v_int]; * sp = sp - 1; * \endcode */ @@ -201,7 +201,7 @@ class StackVM { /*! * \brief debug instruction. * \code - * CHECK_EQ(sp, code[pc + 1]).v_int; + * ICHECK_EQ(sp, code[pc + 1]).v_int; * pc += 2; * \code */ @@ -391,7 +391,7 @@ class StackVM { * \return The load opcode */ static OpCode GetLoad(DLDataType t) { - CHECK_EQ(t.lanes, 1U); + ICHECK_EQ(t.lanes, 1U); if (t.code == kTVMOpaqueHandle) return ARRAY_LOAD_HANDLE; if (t.code == kDLInt) { switch (t.bits) { @@ -420,7 +420,7 @@ class StackVM { * \return The load opcode */ static OpCode GetStore(DLDataType t) { - CHECK_EQ(t.lanes, 1U); + ICHECK_EQ(t.lanes, 1U); if (t.code == kTVMOpaqueHandle) return ARRAY_STORE_HANDLE; if (t.code == kDLInt) { switch (t.bits) { diff --git a/src/runtime/stackvm/stackvm_module.cc b/src/runtime/stackvm/stackvm_module.cc index 88c19362a1f8..c815857ac66f 100644 --- a/src/runtime/stackvm/stackvm_module.cc +++ b/src/runtime/stackvm/stackvm_module.cc @@ -71,7 +71,7 @@ class StackVMModuleNode : public runtime::ModuleNode { strm->Write(num_imports); for (runtime::Module im : imports_) { - CHECK_EQ(im->imports().size(), 0U) << "Only support simply one-level hierarchy"; + ICHECK_EQ(im->imports().size(), 0U) << "Only support simply one-level hierarchy"; std::string tkey = im->type_key(); strm->Write(tkey); LOG(INFO) << "save " << tkey; @@ -100,7 +100,7 @@ class StackVMModuleNode : public runtime::ModuleNode { strm->Read(&num_imports); for (uint64_t i = 0; i < num_imports; ++i) { std::string tkey; - CHECK(strm->Read(&tkey)); + ICHECK(strm->Read(&tkey)); std::string loadkey = "runtime.module.loadbinary_"; std::string fkey = loadkey + tkey; const PackedFunc* f = Registry::Get(fkey); @@ -114,7 +114,7 @@ class StackVMModuleNode : public runtime::ModuleNode { loaders += name.substr(loadkey.size()); } } - CHECK(f != nullptr) + ICHECK(f != nullptr) << "Binary was created using " << tkey << " but a loader of that name is not registered. Available loaders are " << loaders << ". Perhaps you need to recompile with this runtime enabled."; diff --git a/src/runtime/thread_pool.cc b/src/runtime/thread_pool.cc index bf4133453e7c..9bb00eea1edc 100644 --- a/src/runtime/thread_pool.cc +++ b/src/runtime/thread_pool.cc @@ -21,13 +21,13 @@ * \file thread_pool.cc * \brief Threadpool for multi-threading runtime. */ -#include #include #include #include #include #include #include +#include #if TVM_THREADPOOL_USE_OPENMP #include #endif @@ -189,7 +189,7 @@ class SpscTaskQueue { } const uint32_t head = head_.load(std::memory_order_relaxed); // sanity check if the queue is empty - CHECK(tail_.load(std::memory_order_acquire) != head); + ICHECK(tail_.load(std::memory_order_acquire) != head); *output = buffer_[head]; head_.store((head + 1) % kRingSize, std::memory_order_release); return true; @@ -280,13 +280,13 @@ class ThreadPool { } int Launch(FTVMParallelLambda flambda, void* cdata, int num_task, int need_sync) { ParallelLauncher* launcher = ParallelLauncher::ThreadLocal(); - CHECK(!launcher->is_worker) + ICHECK(!launcher->is_worker) << "Cannot launch parallel job inside worker, consider fuse then parallel"; if (num_task == 0) { num_task = num_workers_used_; } if (need_sync != 0) { - CHECK_LE(num_task, num_workers_used_) + ICHECK_LE(num_task, num_workers_used_) << "Request parallel sync task larger than number of threads used " << " workers=" << num_workers_used_ << " request=" << num_task; } @@ -333,7 +333,7 @@ class ThreadPool { // TODO(tulloch): should we make this configurable via standard APIs? static size_t spin_count = GetSpinCount(); while (queue->Pop(&task, spin_count)) { - CHECK(task.launcher != nullptr); + ICHECK(task.launcher != nullptr); TVMParallelGroupEnv* penv = &(task.launcher->env); void* cdata = task.launcher->cdata; if ((*task.launcher->flambda)(task.task_id, penv, cdata) == 0) { diff --git a/src/runtime/threading_backend.cc b/src/runtime/threading_backend.cc index 019df3e597c9..2527f4799086 100644 --- a/src/runtime/threading_backend.cc +++ b/src/runtime/threading_backend.cc @@ -21,8 +21,8 @@ * \file threading_backend.cc * \brief Native threading backend */ -#include #include +#include #include #include @@ -46,7 +46,7 @@ class ThreadGroup::Impl { public: Impl(int num_workers, std::function worker_callback, bool exclude_worker0) : num_workers_(num_workers) { - CHECK_GE(num_workers, 1) << "Requested a non-positive number of worker threads."; + ICHECK_GE(num_workers, 1) << "Requested a non-positive number of worker threads."; for (int i = exclude_worker0; i < num_workers_; ++i) { threads_.emplace_back([worker_callback, i] { worker_callback(i); }); } @@ -112,7 +112,7 @@ class ThreadGroup::Impl { #endif #endif #if defined(__linux__) || defined(__ANDROID__) - CHECK_GE(sorted_order_.size(), num_workers_); + ICHECK_GE(sorted_order_.size(), num_workers_); for (unsigned i = 0; i < threads_.size(); ++i) { unsigned core_id; diff --git a/src/runtime/vm/bytecode.cc b/src/runtime/vm/bytecode.cc index 78972beb1ed2..f82d708468f7 100644 --- a/src/runtime/vm/bytecode.cc +++ b/src/runtime/vm/bytecode.cc @@ -22,7 +22,6 @@ * \brief The bytecode for Relay virtual machine. */ -#include #include #include diff --git a/src/runtime/vm/executable.cc b/src/runtime/vm/executable.cc index 08e9af61fdc3..eb1707b25aa3 100644 --- a/src/runtime/vm/executable.cc +++ b/src/runtime/vm/executable.cc @@ -43,9 +43,9 @@ namespace tvm { namespace runtime { namespace vm { -#define STREAM_CHECK(val, section) \ - CHECK(val) << "Invalid VM file format in the " << section << " section." \ - << "\n"; +#define STREAM_CHECK(val, section) \ + ICHECK(val) << "Invalid VM file format in the " << section << " section." \ + << "\n"; // Helper to serialize a vm instruction. VMInstructionSerializer SerializeInstruction(const Instruction& instr); @@ -527,7 +527,7 @@ void Executable::LoadConstantSection(dmlc::Stream* strm) { // Load the const to device mapping. std::vector const_device_type; STREAM_CHECK(strm->Read(&const_device_type), "constant"); - CHECK_EQ(size, const_device_type.size()); + ICHECK_EQ(size, const_device_type.size()); for (auto dev : const_device_type) { this->const_device_type.push_back(static_cast(dev)); } @@ -545,7 +545,7 @@ void Executable::LoadPrimitiveOpNames(dmlc::Stream* strm) { // `instr_fields`. inline std::vector ExtractFields(const std::vector& instr_fields, Index start, Index cnt) { - CHECK_LE(static_cast(start + cnt), instr_fields.size()); + ICHECK_LE(static_cast(start + cnt), instr_fields.size()); std::vector ret; for (auto i = start; i < start + cnt; i++) { ret.push_back(instr_fields[i]); @@ -765,8 +765,8 @@ void Executable::LoadCodeSection(dmlc::Stream* strm) { VMFunction vm_func = VMFunction(loaded_func.name, loaded_func.params, instructions, loaded_func.register_file_size, loaded_func.params_device_type); auto it = this->global_map.find(loaded_func.name); - CHECK(it != this->global_map.end()); - CHECK_LE(it->second, this->global_map.size()); + ICHECK(it != this->global_map.end()); + ICHECK_LE(it->second, this->global_map.size()); this->functions[it->second] = vm_func; } } @@ -774,14 +774,14 @@ void Executable::LoadCodeSection(dmlc::Stream* strm) { TVM_REGISTER_GLOBAL("runtime.GetNumOfGlobals").set_body([](TVMArgs args, TVMRetValue* rv) { runtime::Module mod = args[0]; const auto* exec = dynamic_cast(mod.operator->()); - CHECK(exec); + ICHECK(exec); *rv = static_cast(exec->global_map.size()); }); TVM_REGISTER_GLOBAL("runtime.GetGlobalFields").set_body([](TVMArgs args, TVMRetValue* rv) { runtime::Module mod = args[0]; const auto* exec = dynamic_cast(mod.operator->()); - CHECK(exec); + ICHECK(exec); int idx = args[1]; std::vector > globals(exec->global_map.begin(), exec->global_map.end()); @@ -789,24 +789,24 @@ TVM_REGISTER_GLOBAL("runtime.GetGlobalFields").set_body([](TVMArgs args, TVMRetV return a.second < b.second; }; std::sort(globals.begin(), globals.end(), comp); - CHECK_LT(idx, globals.size()); + ICHECK_LT(idx, globals.size()); *rv = globals[idx].first; }); TVM_REGISTER_GLOBAL("runtime.GetNumOfPrimitives").set_body([](TVMArgs args, TVMRetValue* rv) { runtime::Module mod = args[0]; const auto* exec = dynamic_cast(mod.operator->()); - CHECK(exec); + ICHECK(exec); *rv = static_cast(exec->primitive_map.size()); }); TVM_REGISTER_GLOBAL("runtime.GetPrimitiveFields").set_body([](TVMArgs args, TVMRetValue* rv) { runtime::Module mod = args[0]; const auto* exec = dynamic_cast(mod.operator->()); - CHECK(exec); + ICHECK(exec); int idx = args[1]; - CHECK_GE(idx, 0); - CHECK_LT(idx, exec->primitive_map.size()); + ICHECK_GE(idx, 0); + ICHECK_LT(idx, exec->primitive_map.size()); for (const auto& it : exec->primitive_map) { if (idx == static_cast(it.second)) { diff --git a/src/runtime/vm/memory_manager.cc b/src/runtime/vm/memory_manager.cc index 4d443d9a26a2..4e480507e71a 100644 --- a/src/runtime/vm/memory_manager.cc +++ b/src/runtime/vm/memory_manager.cc @@ -35,7 +35,7 @@ namespace vm { static void BufferDeleter(Object* obj) { auto* ptr = static_cast(obj); - CHECK(ptr->manager_ctx != nullptr); + ICHECK(ptr->manager_ctx != nullptr); Buffer* buffer = reinterpret_cast(ptr->manager_ctx); MemoryManager::GetAllocator(buffer->ctx)->Free(*(buffer)); delete buffer; @@ -59,15 +59,15 @@ void StorageObj::Deleter(Object* obj) { } inline void VerifyDataType(DLDataType dtype) { - CHECK_GE(dtype.lanes, 1); + ICHECK_GE(dtype.lanes, 1); if (dtype.code == kDLFloat) { - CHECK_EQ(dtype.bits % 8, 0); + ICHECK_EQ(dtype.bits % 8, 0); } else { // allow uint1 as a special flag for bool. if (dtype.bits == 1 && dtype.code == kDLUInt) return; - CHECK_EQ(dtype.bits % 8, 0); + ICHECK_EQ(dtype.bits % 8, 0); } - CHECK_EQ(dtype.bits & (dtype.bits - 1), 0); + ICHECK_EQ(dtype.bits & (dtype.bits - 1), 0); } inline size_t GetDataAlignment(const DLTensor& arr) { @@ -102,7 +102,7 @@ NDArray StorageObj::AllocNDArray(size_t offset, std::vector shape, DLDa NDArray ret(GetObjectPtr(container)); // RAII in effect, now run the check. - CHECK(offset + needed_size <= this->buffer.size) + ICHECK(offset + needed_size <= this->buffer.size) << "storage allocation failure, attempted to allocate " << needed_size << " at offset " << offset << " in region that is " << this->buffer.size << "bytes"; diff --git a/src/runtime/vm/profiler/vm.cc b/src/runtime/vm/profiler/vm.cc index 63001634558e..94d827893b92 100644 --- a/src/runtime/vm/profiler/vm.cc +++ b/src/runtime/vm/profiler/vm.cc @@ -43,7 +43,7 @@ PackedFunc VirtualMachineDebug::GetFunction(const std::string& name, const ObjectPtr& sptr_to_self) { if (name == "get_stat") { return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { - CHECK_EQ(args.size(), 1U); + ICHECK_EQ(args.size(), 1U); std::vector> op_acc_time; for (auto kv : op_durations_) { auto val = @@ -95,7 +95,7 @@ PackedFunc VirtualMachineDebug::GetFunction(const std::string& name, void VirtualMachineDebug::LoadExecutable(const Executable* exec) { VirtualMachine::LoadExecutable(exec); - CHECK(exec_); + ICHECK(exec_); for (auto kv : exec_->primitive_map) { packed_index_map_[kv.second] = kv.first; op_invokes_[kv.second] = 0; @@ -104,17 +104,17 @@ void VirtualMachineDebug::LoadExecutable(const Executable* exec) { void VirtualMachineDebug::InvokePacked(Index packed_index, const PackedFunc& func, Index arg_count, Index output_size, const std::vector& args) { - CHECK(exec_); - CHECK(!ctxs_.empty()) << "Context has not been initialized yet."; + ICHECK(exec_); + ICHECK(!ctxs_.empty()) << "Context has not been initialized yet."; // The device context of any input of the operator is used for // synchronization. - CHECK_GT(arg_count, 0U); + ICHECK_GT(arg_count, 0U); ObjectRef arg = args[0]; while (arg->IsInstance()) { ADT adt = Downcast(arg); arg = adt[0]; } - CHECK(arg->IsInstance()); + ICHECK(arg->IsInstance()); auto nd_array = Downcast(arg); auto ctx = nd_array->ctx; @@ -140,8 +140,8 @@ runtime::Module CreateVirtualMachineDebug(const Executable* exec) { TVM_REGISTER_GLOBAL("runtime._VirtualMachineDebug").set_body([](TVMArgs args, TVMRetValue* rv) { runtime::Module mod = args[0]; const auto* exec = dynamic_cast(mod.operator->()); - CHECK(exec) << "Virtual machine has not been defined yet." - << "\n"; + ICHECK(exec) << "Virtual machine has not been defined yet." + << "\n"; *rv = CreateVirtualMachineDebug(exec); }); diff --git a/src/runtime/vm/serialize_utils.h b/src/runtime/vm/serialize_utils.h index 726a46ee2fa1..990da31750d4 100644 --- a/src/runtime/vm/serialize_utils.h +++ b/src/runtime/vm/serialize_utils.h @@ -79,8 +79,8 @@ struct VMFunctionSerializer { bool Load(dmlc::Stream* strm) { std::vector func_info; if (!strm->Read(&func_info)) return false; - CHECK_EQ(func_info.size(), 3U) << "Failed to decode the vm function." - << "\n"; + ICHECK_EQ(func_info.size(), 3U) << "Failed to decode the vm function." + << "\n"; name = func_info[0]; register_file_size = std::stoll(func_info[1]); // Get the number of instructions. @@ -135,7 +135,7 @@ struct VMInstructionSerializer { bool Load(dmlc::Stream* strm) { std::vector instr; if (!strm->Read(&instr)) return false; - CHECK_GE(instr.size(), 2U); + ICHECK_GE(instr.size(), 2U); Index loaded_hash = instr[0]; opcode = instr[1]; @@ -144,7 +144,7 @@ struct VMInstructionSerializer { } Index hash = Hash(); - CHECK_EQ(loaded_hash, hash) << "Found mismatch in hash for opcode: " << opcode << "\n"; + ICHECK_EQ(loaded_hash, hash) << "Found mismatch in hash for opcode: " << opcode << "\n"; return true; } diff --git a/src/runtime/vm/vm.cc b/src/runtime/vm/vm.cc index 0a0ff2697674..473b5d759272 100644 --- a/src/runtime/vm/vm.cc +++ b/src/runtime/vm/vm.cc @@ -70,7 +70,7 @@ inline ObjectRef CopyTo(ObjectRef src, const DLContext& ctx) { } return src; } else { - CHECK(src->IsInstance()) + ICHECK(src->IsInstance()) << "VM data must be NDArray or a list of NDArray, but received: " << src->_type_key; std::vector ret; ADT adt = Downcast(src); @@ -93,7 +93,7 @@ std::vector ToShape(NDArray shape_tensor) { // Otherwise we should be rank-1, and we will extract the number of dimensions // for the output vector. - CHECK_EQ(rank, 1U) << "shape tensor should be a k-length vector, found " << rank; + ICHECK_EQ(rank, 1U) << "shape tensor should be a k-length vector, found " << rank; int64_t ndim = shape_tensor.Shape().at(0); shape.resize(ndim); @@ -115,24 +115,24 @@ PackedFunc VirtualMachine::GetFunction(const std::string& name, const ObjectPtr& sptr_to_self) { if (name == "invoke") { return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { - CHECK(exec_) << "The executable is not created yet."; + ICHECK(exec_) << "The executable is not created yet."; std::string func_name = args[0]; auto git = exec_->global_map.find(func_name); - CHECK(git != exec_->global_map.end()) + ICHECK(git != exec_->global_map.end()) << "Cannot find function " << func_name << " in the executable"; auto func = exec_->functions[git->second]; if (func.params.empty()) { *rv = Invoke(func, {}); } else { auto it = inputs_.find(func_name); - CHECK(it != inputs_.end()) << "Input has not been set for function " << func_name; + ICHECK(it != inputs_.end()) << "Input has not been set for function " << func_name; const std::vector& func_args = it->second; *rv = Invoke(func, func_args); } }); } else if (name == "init") { return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { - CHECK_EQ(args.size() % 3, 0); + ICHECK_EQ(args.size() % 3, 0); std::vector contexts; std::vector alloc_types; for (int i = 0; i < args.size() / 3; ++i) { @@ -148,16 +148,16 @@ PackedFunc VirtualMachine::GetFunction(const std::string& name, }); } else if (name == "set_input") { return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { - CHECK(exec_) << "The executable is not created yet."; + ICHECK(exec_) << "The executable is not created yet."; std::string func_name = args[0]; auto gvit = exec_->global_map.find(func_name); - CHECK(gvit != exec_->global_map.end()) << "Cannot find function " << func_name; + ICHECK(gvit != exec_->global_map.end()) << "Cannot find function " << func_name; auto func_index = gvit->second; const auto& vm_func = exec_->functions[func_index]; const auto& param_names = vm_func.params; - CHECK_EQ(args.size() - 1, param_names.size()) + ICHECK_EQ(args.size() - 1, param_names.size()) << "The number of provided parameters doesn't match the number of arguments"; - CHECK_EQ(param_names.size(), vm_func.params_device_type.size()) + ICHECK_EQ(param_names.size(), vm_func.params_device_type.size()) << "The number of provided parameters doesn't match the number of assigned devices"; std::vector func_args(param_names.size()); for (int i = 1; i < args.size(); ++i) { @@ -176,10 +176,10 @@ PackedFunc VirtualMachine::GetFunction(const std::string& name, } inline TVMContext VirtualMachine::GetContext(Index device_type) const { - CHECK_GE(ctxs_.size(), device_type) << "ctxs_ list doesn't contain device:" << device_type; + ICHECK_GE(ctxs_.size(), device_type) << "ctxs_ list doesn't contain device:" << device_type; auto ctx = ctxs_[device_type]; - CHECK_EQ(static_cast(ctx.device_type), device_type) + ICHECK_EQ(static_cast(ctx.device_type), device_type) << "device type " << device_type << " has not been initialized int the context list."; return ctx; } @@ -190,7 +190,7 @@ void VirtualMachine::PushFrame(Index arg_count, Index ret_pc, const VMFunction& } Index VirtualMachine::PopFrame() { - CHECK_GT(frames_.size(), 0); + ICHECK_GT(frames_.size(), 0); const VMFrame& fr = frames_.back(); func_index_ = fr.func_index; code_ = fr.code; @@ -222,9 +222,9 @@ ObjectRef VirtualMachine::Invoke(const VMFunction& func, const std::vector& args) { - CHECK(exec_) << "The executable has not been created yet."; + ICHECK(exec_) << "The executable has not been created yet."; auto it = exec_->global_map.find(name); - CHECK(it != exec_->global_map.end()) << "Cannot find function " << name << " in the executable"; + ICHECK(it != exec_->global_map.end()) << "Cannot find function " << name << " in the executable"; auto func_index_ = it->second; DLOG(INFO) << "Invoke Global " << name << " at index " << func_index_; return Invoke(exec_->functions[func_index_], args); @@ -263,12 +263,12 @@ void VirtualMachine::InvokePacked(Index packed_index, const PackedFunc& func, In } void VirtualMachine::LoadExecutable(const Executable* exec) { - CHECK(exec) << "The executable is not created yet."; + ICHECK(exec) << "The executable is not created yet."; exec_ = exec; runtime::Module lib = exec_->lib; // Get the list of packed functions. - CHECK(exec->primitive_map.empty() || lib.operator->()) + ICHECK(exec->primitive_map.empty() || lib.operator->()) << "runtime module should have been built for primitive functions" << "\n"; for (const auto& it : exec_->primitive_map) { @@ -278,17 +278,17 @@ void VirtualMachine::LoadExecutable(const Executable* exec) { packed_funcs_.resize(packed_index + 1); } tvm::runtime::PackedFunc pf = lib.GetFunction(packed_name, true); - CHECK(pf != nullptr) << "Cannot find function in module: " << packed_name; + ICHECK(pf != nullptr) << "Cannot find function in module: " << packed_name; packed_funcs_[packed_index] = pf; } for (size_t i = 0; i < packed_funcs_.size(); ++i) { - CHECK(packed_funcs_[i] != nullptr) << "Packed function " << i << " is not initialized"; + ICHECK(packed_funcs_[i] != nullptr) << "Packed function " << i << " is not initialized"; } } void VirtualMachine::Init(const std::vector& ctxs, const std::vector& alloc_types) { - CHECK_EQ(ctxs.size(), alloc_types.size()); + ICHECK_EQ(ctxs.size(), alloc_types.size()); // Cache the context for (size_t i = 0; i < ctxs.size(); i++) { auto dev_type = static_cast(ctxs[i].device_type); @@ -343,8 +343,8 @@ inline int64_t VirtualMachine::LoadScalarInt(Index r) const { } void VirtualMachine::RunLoop() { - CHECK(this->exec_); - CHECK(this->code_); + ICHECK(this->exec_); + ICHECK(this->code_); pc_ = 0; Index frame_start = frames_.size(); while (true) { @@ -398,7 +398,7 @@ void VirtualMachine::RunLoop() { } case Opcode::InvokePacked: { DLOG(INFO) << "InvokedPacked " << instr.packed_index << " arity=" << instr.arity; - CHECK_LE(instr.packed_index, packed_funcs_.size()); + ICHECK_LE(instr.packed_index, packed_funcs_.size()); const auto& func = packed_funcs_[instr.packed_index]; const auto& arity = instr.arity; std::vector args; @@ -456,10 +456,10 @@ void VirtualMachine::RunLoop() { int32_t target_val = LoadScalarInt(instr.if_op.target); if (test_val == target_val) { - CHECK_NE(instr.if_op.true_offset, 0); + ICHECK_NE(instr.if_op.true_offset, 0); pc_ += instr.if_op.true_offset; } else { - CHECK_NE(instr.if_op.false_offset, 0); + ICHECK_NE(instr.if_op.false_offset, 0); pc_ += instr.if_op.false_offset; } @@ -524,10 +524,10 @@ void VirtualMachine::RunLoop() { auto storage_obj = SimpleObjAllocator().make_object(); auto dev_type = instr.alloc_storage.device_type; - CHECK_LT(static_cast(dev_type), allocators_.size()) + ICHECK_LT(static_cast(dev_type), allocators_.size()) << "Memory allocator for device " << dev_type << " has not been initialized"; auto* alloc = allocators_[dev_type]; - CHECK(alloc) << "Did you forget to init the VirtualMachine with contexts?"; + ICHECK(alloc) << "Did you forget to init the VirtualMachine with contexts?"; storage_obj->buffer = alloc->Alloc(size, alignment, instr.alloc_storage.dtype_hint); Storage storage(storage_obj); WriteRegister(instr.dst, storage); @@ -569,8 +569,8 @@ void VirtualMachine::RunLoop() { auto shape_obj = ReadRegister(instr.reshape_tensor.newshape); NDArray shape_tensor = Downcast(CopyTo(shape_obj, cpu_ctx)); const DLTensor* dl_tensor = shape_tensor.operator->(); - CHECK_EQ(dl_tensor->dtype.code, 0u); - CHECK_EQ(dl_tensor->dtype.bits, 64); + ICHECK_EQ(dl_tensor->dtype.code, 0u); + ICHECK_EQ(dl_tensor->dtype.bits, 64); int64_t* dims = reinterpret_cast(dl_tensor->data); int64_t ndim = shape_tensor->shape[0]; std::vector shape(dims, dims + ndim); @@ -584,7 +584,7 @@ void VirtualMachine::RunLoop() { auto tensor_src = ReadRegister(instr.src); NDArray src_data = Downcast(tensor_src); DLContext src_ctx = src_data->ctx; - CHECK_EQ(static_cast(src_ctx.device_type), instr.src_device_type); + ICHECK_EQ(static_cast(src_ctx.device_type), instr.src_device_type); DLContext dst_ctx; dst_ctx.device_type = static_cast(instr.dst_device_type); @@ -610,7 +610,7 @@ runtime::Module CreateVirtualMachine(const Executable* exec) { TVM_REGISTER_GLOBAL("runtime._VirtualMachine").set_body([](TVMArgs args, TVMRetValue* rv) { runtime::Module mod = args[0]; const auto* exec = dynamic_cast(mod.operator->()); - CHECK(exec) << "The virtual machine executable has not been defined yet."; + ICHECK(exec) << "The virtual machine executable has not been defined yet."; *rv = CreateVirtualMachine(exec); }); diff --git a/src/runtime/vulkan/vulkan.cc b/src/runtime/vulkan/vulkan.cc index 3cbe245ed095..cbf1974ee3c7 100644 --- a/src/runtime/vulkan/vulkan.cc +++ b/src/runtime/vulkan/vulkan.cc @@ -202,7 +202,7 @@ class VulkanDeviceAPI final : public DeviceAPI { void CopyDataFromTo(const void* from, size_t from_offset, void* to, size_t to_offset, size_t size, TVMContext ctx_from, TVMContext ctx_to, DLDataType type_hint, TVMStreamHandle stream) final { - CHECK(stream == nullptr); + ICHECK(stream == nullptr); TVMContext ctx = ctx_from; if (ctx_from.device_type == kDLCPU) { ctx = ctx_to; @@ -223,7 +223,7 @@ class VulkanDeviceAPI final : public DeviceAPI { copy_info.size = size; vkCmdCopyBuffer(state->cmd_buffer_, from_buf->buffer, to_buf->buffer, 1, ©_info); // 2: barrier(transfer-> compute|transfer) - CHECK_EQ(ctx_from.device_id, ctx_to.device_id) << "Vulkan disallow cross device copy."; + ICHECK_EQ(ctx_from.device_id, ctx_to.device_id) << "Vulkan disallow cross device copy."; VkMemoryBarrier barrier_info; barrier_info.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; barrier_info.pNext = nullptr; @@ -324,7 +324,7 @@ class VulkanDeviceAPI final : public DeviceAPI { } void StreamSync(TVMContext ctx, TVMStreamHandle stream) final { - CHECK(stream == nullptr); + ICHECK(stream == nullptr); VulkanThreadEntry::ThreadLocal()->Stream(ctx.device_id)->Synchronize(); } @@ -347,7 +347,7 @@ class VulkanDeviceAPI final : public DeviceAPI { } const VulkanContext& context(size_t device_id) const { - CHECK_LT(device_id, context_.size()); + ICHECK_LT(device_id, context_.size()); return context_[device_id]; } @@ -363,7 +363,7 @@ void VulkanDeviceAPI::GetAttr(TVMContext ctx, DeviceAttrKind kind, TVMRetValue* *rv = static_cast(index < context_.size()); return; } - CHECK_LT(index, context_.size()) << "Invalid device id " << index; + ICHECK_LT(index, context_.size()) << "Invalid device id " << index; const auto& vctx = context(index); switch (kind) { case kMaxThreadsPerBlock: { @@ -600,7 +600,7 @@ VulkanDeviceAPI::VulkanDeviceAPI() { ctx.coherent_staging = ty.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; } } - CHECK_GE(win_rank, 0) << "Cannot find suitable staging memory on device."; + ICHECK_GE(win_rank, 0) << "Cannot find suitable staging memory on device."; win_rank = -1; for (uint32_t k = 0; k < prop.memoryTypeCount; ++k) { @@ -619,7 +619,7 @@ VulkanDeviceAPI::VulkanDeviceAPI() { ctx.compute_mtype_index = k; } } - CHECK_GE(win_rank, 0) << "Cannot find suitable local memory on device."; + ICHECK_GE(win_rank, 0) << "Cannot find suitable local memory on device."; auto has_extension = [&extensions](const char* query) { return std::any_of(extensions.begin(), extensions.end(), [&](const char* extension) { return std::strcmp(query, extension) == 0; }); @@ -740,8 +740,8 @@ class VulkanModuleNode final : public runtime::ModuleNode { const char* type_key() const final { return "vulkan"; } PackedFunc GetFunction(const std::string& name, const ObjectPtr& sptr_to_self) final { - CHECK_EQ(sptr_to_self.get(), this); - CHECK_NE(name, symbol::tvm_module_main) << "Device function do not have main"; + ICHECK_EQ(sptr_to_self.get(), this); + ICHECK_NE(name, symbol::tvm_module_main) << "Device function do not have main"; auto it = fmap_.find(name); if (it == fmap_.end()) return PackedFunc(); const FunctionInfo& info = it->second; @@ -757,7 +757,7 @@ class VulkanModuleNode final : public runtime::ModuleNode { for (size_t device_id = 0; device_id < ecache_.size(); ++device_id) { for (auto& kv : ecache_[device_id]) { auto& pe = kv.second; - CHECK(pe); + ICHECK(pe); const auto& vctx = VulkanDeviceAPI::Global()->context(device_id); if (pe->descriptor_update_template != VK_NULL_HANDLE) { @@ -786,7 +786,7 @@ class VulkanModuleNode final : public runtime::ModuleNode { { // create shader auto sit = smap_.find(func_name); - CHECK(sit != smap_.end()); + ICHECK(sit != smap_.end()); const std::vector& data = sit->second.data; VkShaderModuleCreateInfo shader_cinfo; shader_cinfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; @@ -802,7 +802,7 @@ class VulkanModuleNode final : public runtime::ModuleNode { { auto fit = fmap_.find(func_name); - CHECK(fit != fmap_.end()); + ICHECK(fit != fmap_.end()); for (DLDataType arg_type : fit->second.arg_types) { if (arg_type.code == kTVMOpaqueHandle) { { @@ -885,7 +885,7 @@ class VulkanModuleNode final : public runtime::ModuleNode { if (num_pack_args != 0) { playout_cinfo.pushConstantRangeCount = 1; playout_cinfo.pPushConstantRanges = &crange; - CHECK_LE(crange.size, vctx.phy_device_prop.limits.maxPushConstantsSize); + ICHECK_LE(crange.size, vctx.phy_device_prop.limits.maxPushConstantsSize); } else { playout_cinfo.pushConstantRangeCount = 0; playout_cinfo.pPushConstantRanges = nullptr; @@ -932,7 +932,7 @@ class VulkanModuleNode final : public runtime::ModuleNode { void SaveToFile(const std::string& file_name, const std::string& format) final { std::string fmt = GetFileFormat(file_name, format); - CHECK_EQ(fmt, fmt_) << "Can only save to customized format vulkan"; + ICHECK_EQ(fmt, fmt_) << "Can only save to customized format vulkan"; std::string meta_file = GetMetaFilePath(file_name); SaveMetaDataToFile(meta_file, fmap_); std::string data_bin; @@ -1046,7 +1046,7 @@ VulkanStream* VulkanThreadEntry::Stream(size_t device_id) { void VulkanWrappedFunc::operator()(TVMArgs args, TVMRetValue* rv, const ArgUnion* pack_args) const { int device_id = VulkanThreadEntry::ThreadLocal()->ctx.device_id; - CHECK_LT(device_id, kVulkanMaxNumDevice); + ICHECK_LT(device_id, kVulkanMaxNumDevice); const auto& vctx = VulkanDeviceAPI::Global()->context(device_id); if (!scache_[device_id]) { scache_[device_id] = m_->GetPipeline(device_id, func_name_, num_pack_args_); @@ -1067,7 +1067,7 @@ void VulkanWrappedFunc::operator()(TVMArgs args, TVMRetValue* rv, const ArgUnion // Can safely capture by reference as this lambda is immediately executed on the calling thread. VulkanThreadEntry::ThreadLocal()->Stream(device_id)->Launch([&](VulkanStreamState* state) { vkCmdBindPipeline(state->cmd_buffer_, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline->pipeline); - CHECK(pipeline->descriptor_update_template != VK_NULL_HANDLE); + ICHECK(pipeline->descriptor_update_template != VK_NULL_HANDLE); vctx.descriptor_template_khr_functions->vkCmdPushDescriptorSetWithTemplateKHR( state->cmd_buffer_, pipeline->descriptor_update_template, pipeline->pipeline_layout, 0, descriptor_buffers.data()); @@ -1152,7 +1152,7 @@ Module VulkanModuleLoadFile(const std::string& file_name, const std::string& for dmlc::Stream* stream = &fs; uint32_t magic; stream->Read(&magic); - CHECK_EQ(magic, kVulkanModuleMagic) << "VulkanModule Magic mismatch"; + ICHECK_EQ(magic, kVulkanModuleMagic) << "VulkanModule Magic mismatch"; stream->Read(&smap); return VulkanModuleCreate(smap, fmap, ""); } diff --git a/src/runtime/vulkan/vulkan_common.h b/src/runtime/vulkan/vulkan_common.h index 780b11184931..da604f6fa792 100644 --- a/src/runtime/vulkan/vulkan_common.h +++ b/src/runtime/vulkan/vulkan_common.h @@ -18,10 +18,10 @@ */ #pragma once -#include #include #include #include +#include #include #include @@ -80,10 +80,10 @@ inline const char* VKGetErrorString(VkResult error) { * \brief Protected Vulkan call * \param func Expression to call. */ -#define VULKAN_CHECK_ERROR(__e) \ - { \ - CHECK(__e == VK_SUCCESS) << "Vulan Error, code=" << __e << ": " \ - << vulkan::VKGetErrorString(__e); \ +#define VULKAN_CHECK_ERROR(__e) \ + { \ + ICHECK(__e == VK_SUCCESS) << "Vulan Error, code=" << __e << ": " \ + << vulkan::VKGetErrorString(__e); \ } #define VULKAN_CALL(func) \ diff --git a/src/runtime/vulkan/vulkan_shader.h b/src/runtime/vulkan/vulkan_shader.h index d56ca61e91cb..7558a95ee45e 100644 --- a/src/runtime/vulkan/vulkan_shader.h +++ b/src/runtime/vulkan/vulkan_shader.h @@ -18,10 +18,10 @@ */ #pragma once -#include #include #include #include +#include #include diff --git a/src/runtime/vulkan/vulkan_stream.h b/src/runtime/vulkan/vulkan_stream.h index 388cacc577b0..c5094bdf28db 100644 --- a/src/runtime/vulkan/vulkan_stream.h +++ b/src/runtime/vulkan/vulkan_stream.h @@ -93,7 +93,7 @@ class VulkanStream { void LaunchDeferred(const std::function& deferred_initializer, const std::function& deferred_kernel, const VulkanStreamToken& deferred_token) { - CHECK(!vctx_->UseImmediate()); + ICHECK(!vctx_->UseImmediate()); // It is invalid to schedule this instance on the current stream if we already // have a matching descriptor set and a non-matching buffer set. diff --git a/src/runtime/workspace_pool.cc b/src/runtime/workspace_pool.cc index 49a4c961159d..2d347c32ac10 100644 --- a/src/runtime/workspace_pool.cc +++ b/src/runtime/workspace_pool.cc @@ -95,7 +95,7 @@ class WorkspacePool::Pool { int index = static_cast(allocated_.size()) - 2; for (; index > 0 && allocated_[index].data != data; --index) { } - CHECK_GT(index, 0) << "trying to free things that has not been allocated"; + ICHECK_GT(index, 0) << "trying to free things that has not been allocated"; e = allocated_[index]; allocated_.erase(allocated_.begin() + index); } @@ -115,7 +115,7 @@ class WorkspacePool::Pool { } // Release all resources void Release(TVMContext ctx, DeviceAPI* device) { - CHECK_EQ(allocated_.size(), 1); + ICHECK_EQ(allocated_.size(), 1); for (size_t i = 1; i < free_list_.size(); ++i) { device->FreeDataSpace(ctx, free_list_[i].data); } @@ -160,7 +160,7 @@ void* WorkspacePool::AllocWorkspace(TVMContext ctx, size_t size) { } void WorkspacePool::FreeWorkspace(TVMContext ctx, void* ptr) { - CHECK(static_cast(ctx.device_id) < array_.size() && array_[ctx.device_id] != nullptr); + ICHECK(static_cast(ctx.device_id) < array_.size() && array_[ctx.device_id] != nullptr); array_[ctx.device_id]->Free(ptr); } diff --git a/src/support/base64.h b/src/support/base64.h index 9849542471c2..901922db8edc 100644 --- a/src/support/base64.h +++ b/src/support/base64.h @@ -26,7 +26,7 @@ #ifndef TVM_SUPPORT_BASE64_H_ #define TVM_SUPPORT_BASE64_H_ -#include +#include #include #include @@ -154,7 +154,7 @@ class Base64InStream : public dmlc::Stream { { // second byte temp_ch_ = reader_.GetChar(); - CHECK(temp_ch_ != EOF && !isspace(temp_ch_)) << "invalid base64 format"; + ICHECK(temp_ch_ != EOF && !isspace(temp_ch_)) << "invalid base64 format"; nvalue |= DecodeTable[temp_ch_] << 12; *cptr++ = (nvalue >> 16) & 0xFF; --tlen; @@ -162,13 +162,13 @@ class Base64InStream : public dmlc::Stream { { // third byte temp_ch_ = reader_.GetChar(); - CHECK(temp_ch_ != EOF && !isspace(temp_ch_)) << "invalid base64 format"; + ICHECK(temp_ch_ != EOF && !isspace(temp_ch_)) << "invalid base64 format"; // handle termination if (temp_ch_ == '=') { temp_ch_ = reader_.GetChar(); - CHECK(temp_ch_ == '=') << "invalid base64 format"; + ICHECK(temp_ch_ == '=') << "invalid base64 format"; temp_ch_ = reader_.GetChar(); - CHECK(temp_ch_ == EOF || isspace(temp_ch_)) << "invalid base64 format"; + ICHECK(temp_ch_ == EOF || isspace(temp_ch_)) << "invalid base64 format"; break; } nvalue |= DecodeTable[temp_ch_] << 6; @@ -182,10 +182,10 @@ class Base64InStream : public dmlc::Stream { { // fourth byte temp_ch_ = reader_.GetChar(); - CHECK(temp_ch_ != EOF && !isspace(temp_ch_)) << "invalid base64 format"; + ICHECK(temp_ch_ != EOF && !isspace(temp_ch_)) << "invalid base64 format"; if (temp_ch_ == '=') { temp_ch_ = reader_.GetChar(); - CHECK(temp_ch_ == EOF || isspace(temp_ch_)) << "invalid base64 format"; + ICHECK(temp_ch_ == EOF || isspace(temp_ch_)) << "invalid base64 format"; break; } nvalue |= DecodeTable[temp_ch_]; @@ -200,7 +200,7 @@ class Base64InStream : public dmlc::Stream { temp_ch_ = reader_.GetChar(); } if (kStrictCheck) { - CHECK_EQ(tlen, 0) << "Base64InStream: read incomplete"; + ICHECK_EQ(tlen, 0) << "Base64InStream: read incomplete"; } return size - tlen; } diff --git a/src/support/parallel_for.cc b/src/support/parallel_for.cc index 0b8c810da70b..f4756c29adeb 100644 --- a/src/support/parallel_for.cc +++ b/src/support/parallel_for.cc @@ -21,7 +21,7 @@ * \file parallel_for.cc * \brief An implementation to run loop in parallel. */ -#include +#include #include #include @@ -34,8 +34,8 @@ namespace support { std::vector> rr_partitioner(int begin, int end, int step, int num_threads) { int total_task_count = (end - begin) / step; - CHECK_GE(total_task_count, 0) << "Infinite loop condition with begin: " << begin - << " end: " << end << " step: " << step; + ICHECK_GE(total_task_count, 0) << "Infinite loop condition with begin: " << begin + << " end: " << end << " step: " << step; std::vector> ret; ret.reserve(num_threads); for (size_t thread = 0; begin < end; begin += step, thread = (thread + 1) % num_threads) { @@ -53,8 +53,8 @@ void parallel_for(int begin, int end, const std::function& f, int ste static std::mutex M_GLOBAL_PARALLEL_FOR_FLAG; { std::unique_lock l(M_GLOBAL_PARALLEL_FOR_FLAG); - CHECK(!GLOBAL_PARALLEL_FOR_FLAG) << "There's another parallel_for running. Maybe you're " - << "currently inside another parallel_for loop."; + ICHECK(!GLOBAL_PARALLEL_FOR_FLAG) << "There's another parallel_for running. Maybe you're " + << "currently inside another parallel_for loop."; GLOBAL_PARALLEL_FOR_FLAG = true; } @@ -81,7 +81,7 @@ void parallel_for(int begin, int end, const std::function& f, int ste } { std::unique_lock l(M_GLOBAL_PARALLEL_FOR_FLAG); - CHECK(GLOBAL_PARALLEL_FOR_FLAG); + ICHECK(GLOBAL_PARALLEL_FOR_FLAG); GLOBAL_PARALLEL_FOR_FLAG = false; } try { diff --git a/src/support/pipe.h b/src/support/pipe.h index dcebd0ddf32f..3c1356ba174c 100644 --- a/src/support/pipe.h +++ b/src/support/pipe.h @@ -25,7 +25,7 @@ #define TVM_SUPPORT_PIPE_H_ #include -#include +#include #ifdef _WIN32 #include @@ -64,12 +64,12 @@ class Pipe : public dmlc::Stream { if (size == 0) return 0; #ifdef _WIN32 DWORD nread; - CHECK(ReadFile(handle_, static_cast(ptr), &nread, nullptr)) + ICHECK(ReadFile(handle_, static_cast(ptr), &nread, nullptr)) << "Read Error: " << GetLastError(); #else ssize_t nread; nread = read(handle_, ptr, size); - CHECK_GE(nread, 0) << "Write Error: " << strerror(errno); + ICHECK_GE(nread, 0) << "Write Error: " << strerror(errno); #endif return static_cast(nread); } @@ -83,13 +83,13 @@ class Pipe : public dmlc::Stream { if (size == 0) return; #ifdef _WIN32 DWORD nwrite; - CHECK(WriteFile(handle_, static_cast(ptr), &nwrite, nullptr) && - static_cast(nwrite) == size) + ICHECK(WriteFile(handle_, static_cast(ptr), &nwrite, nullptr) && + static_cast(nwrite) == size) << "Write Error: " << GetLastError(); #else ssize_t nwrite; nwrite = write(handle_, ptr, size); - CHECK_EQ(static_cast(nwrite), size) << "Write Error: " << strerror(errno); + ICHECK_EQ(static_cast(nwrite), size) << "Write Error: " << strerror(errno); #endif } /*! diff --git a/src/support/ring_buffer.h b/src/support/ring_buffer.h index a3938491f1d1..af814158f7b6 100644 --- a/src/support/ring_buffer.h +++ b/src/support/ring_buffer.h @@ -93,7 +93,7 @@ class RingBuffer { * \param size The number of bytes to read. */ void Read(void* data, size_t size) { - CHECK_GE(bytes_available_, size); + ICHECK_GE(bytes_available_, size); size_t ncopy = std::min(size, ring_.size() - head_ptr_); memcpy(data, &ring_[0] + head_ptr_, ncopy); if (ncopy < size) { @@ -112,7 +112,7 @@ class RingBuffer { template size_t ReadWithCallback(FSend fsend, size_t max_nbytes) { size_t size = std::min(max_nbytes, bytes_available_); - CHECK_NE(size, 0U); + ICHECK_NE(size, 0U); size_t ncopy = std::min(size, ring_.size() - head_ptr_); size_t nsend = fsend(&ring_[0] + head_ptr_, ncopy); bytes_available_ -= nsend; diff --git a/src/support/socket.h b/src/support/socket.h index 571b1503072a..16fba6b58e3d 100644 --- a/src/support/socket.h +++ b/src/support/socket.h @@ -49,7 +49,7 @@ using ssize_t = int; #include #include #endif -#include +#include #include #include @@ -75,7 +75,7 @@ namespace support { inline std::string GetHostName() { std::string buf; buf.resize(256); - CHECK_NE(gethostname(&buf[0], 256), -1); + ICHECK_NE(gethostname(&buf[0], 256), -1); return std::string(buf.c_str()); } @@ -117,7 +117,7 @@ struct SockAddr { size_t sep = url.find(","); std::string host = url.substr(2, sep - 3); std::string port = url.substr(sep + 1, url.length() - 1); - CHECK(ValidateIP(host)) << "Url address is not valid " << url; + ICHECK(ValidateIP(host)) << "Url address is not valid " << url; if (host == "localhost") { host = "127.0.0.1"; } @@ -137,7 +137,7 @@ struct SockAddr { hints.ai_socktype = SOCK_STREAM; addrinfo* res = nullptr; int sig = getaddrinfo(host, nullptr, &hints, &res); - CHECK(sig == 0 && res != nullptr) << "cannot obtain address of " << host; + ICHECK(sig == 0 && res != nullptr) << "cannot obtain address of " << host; switch (res->ai_family) { case AF_INET: { sockaddr_in* addr4 = reinterpret_cast(&addr); @@ -152,7 +152,7 @@ struct SockAddr { addr6->sin6_family = AF_INET6; } break; default: - CHECK(false) << "cannot decode address"; + ICHECK(false) << "cannot decode address"; } freeaddrinfo(res); } @@ -177,7 +177,7 @@ struct SockAddr { const in_addr& addr4 = reinterpret_cast(&addr)->sin_addr; sinx_addr = reinterpret_cast(&addr4); } else { - CHECK(false) << "illegal address"; + ICHECK(false) << "illegal address"; } #ifdef _WIN32 @@ -187,7 +187,7 @@ struct SockAddr { const char* s = inet_ntop(addr.ss_family, sinx_addr, &buf[0], static_cast(buf.length())); #endif - CHECK(s != nullptr) << "cannot decode address"; + ICHECK(s != nullptr) << "cannot decode address"; std::ostringstream os; os << s << ":" << port(); return os.str(); @@ -526,8 +526,8 @@ class TCPSocket : public Socket { */ void SendBytes(std::string data) { int datalen = data.length(); - CHECK_EQ(SendAll(&datalen, sizeof(datalen)), sizeof(datalen)); - CHECK_EQ(SendAll(data.c_str(), datalen), datalen); + ICHECK_EQ(SendAll(&datalen, sizeof(datalen)), sizeof(datalen)); + ICHECK_EQ(SendAll(data.c_str(), datalen), datalen); } /*! * \brief Receive the data to remote. @@ -535,10 +535,10 @@ class TCPSocket : public Socket { */ std::string RecvBytes() { int datalen = 0; - CHECK_EQ(RecvAll(&datalen, sizeof(datalen)), sizeof(datalen)); + ICHECK_EQ(RecvAll(&datalen, sizeof(datalen)), sizeof(datalen)); std::string data; data.resize(datalen); - CHECK_EQ(RecvAll(&data[0], datalen), datalen); + ICHECK_EQ(RecvAll(&data[0], datalen), datalen); return data; } }; diff --git a/src/target/build_common.h b/src/target/build_common.h index 9d92697aa319..1816c3ac2650 100644 --- a/src/target/build_common.h +++ b/src/target/build_common.h @@ -44,7 +44,7 @@ inline std::unordered_map ExtractFuncInfo(co std::unordered_map fmap; for (auto kv : mod->functions) { - CHECK(kv.second->IsInstance()) << "Can only lower IR Module with PrimFuncs"; + ICHECK(kv.second->IsInstance()) << "Can only lower IR Module with PrimFuncs"; auto f = Downcast(kv.second); runtime::FunctionInfo info; diff --git a/src/target/codegen.cc b/src/target/codegen.cc index 47603e404635..18aa954787ce 100644 --- a/src/target/codegen.cc +++ b/src/target/codegen.cc @@ -55,7 +55,7 @@ runtime::Module Build(IRModule mod, Target target) { } // the build function. const PackedFunc* bf = runtime::Registry::Get(build_f_name); - CHECK(bf != nullptr) << build_f_name << " is not enabled"; + ICHECK(bf != nullptr) << build_f_name << " is not enabled"; return (*bf)(mod, target); } @@ -233,7 +233,7 @@ runtime::Module PackImportsToLLVM(const runtime::Module& mod, bool system_lib, std::string codegen_f_name = "codegen.codegen_blob"; // the codegen function. const PackedFunc* codegen_f = runtime::Registry::Get(codegen_f_name); - CHECK(codegen_f != nullptr) << "codegen.codegen_blob is not presented."; + ICHECK(codegen_f != nullptr) << "codegen.codegen_blob is not presented."; return (*codegen_f)(blob_byte_array, system_lib, target_triple); } diff --git a/src/target/datatype/registry.cc b/src/target/datatype/registry.cc index c84f917d5c3e..e7807798741d 100644 --- a/src/target/datatype/registry.cc +++ b/src/target/datatype/registry.cc @@ -49,20 +49,20 @@ Registry* Registry::Global() { } void Registry::Register(const std::string& type_name, uint8_t type_code) { - CHECK(type_code >= DataType::kCustomBegin) + ICHECK(type_code >= DataType::kCustomBegin) << "Please choose a type code >= DataType::kCustomBegin for custom types"; code_to_name_[type_code] = type_name; name_to_code_[type_name] = type_code; } uint8_t Registry::GetTypeCode(const std::string& type_name) { - CHECK(name_to_code_.find(type_name) != name_to_code_.end()) + ICHECK(name_to_code_.find(type_name) != name_to_code_.end()) << "Type name " << type_name << " not registered"; return name_to_code_[type_name]; } std::string Registry::GetTypeName(uint8_t type_code) { - CHECK(code_to_name_.find(type_code) != code_to_name_.end()) + ICHECK(code_to_name_.find(type_code) != code_to_name_.end()) << "Type code " << static_cast(type_code) << " not registered"; return code_to_name_[type_code]; } diff --git a/src/target/generic_func.cc b/src/target/generic_func.cc index b5842eebc9e3..16e5a5f9cdc6 100644 --- a/src/target/generic_func.cc +++ b/src/target/generic_func.cc @@ -68,7 +68,7 @@ void GenericFunc::RegisterGenericFunc(GenericFunc func, const std::string& name) Manager* m = Manager::Global(); std::lock_guard(m->mutex); auto it = m->fmap.find(name); - CHECK(it == m->fmap.end()) << "GenericFunc already registered " << name; + ICHECK(it == m->fmap.end()) << "GenericFunc already registered " << name; func->name_ = name; m->fmap[name] = func; } @@ -76,7 +76,7 @@ void GenericFunc::RegisterGenericFunc(GenericFunc func, const std::string& name) GenericFunc& GenericFunc::set_default(const PackedFunc value, bool allow_override) { auto node = static_cast(operator->()); if (!allow_override) { - CHECK(node->generic_func_ == nullptr) + ICHECK(node->generic_func_ == nullptr) << "Generic function already registered for " << node->name_; } node->generic_func_ = value; @@ -88,7 +88,7 @@ GenericFunc& GenericFunc::register_func(const std::vector& tags, for (auto& t : tags) { if (!allow_override) { auto iter = (*this)->dispatch_dict_.find(t); - CHECK(iter == (*this)->dispatch_dict_.end()) + ICHECK(iter == (*this)->dispatch_dict_.end()) << "Tag " << t << " already registered for schedule factory " << (*this)->name_; } (*this)->dispatch_dict_[t] = value; @@ -112,7 +112,7 @@ void GenericFunc::CallPacked(TVMArgs args, TVMRetValue* ret) const { } if (func == nullptr) { - CHECK(node->generic_func_ != nullptr) << "No generic function registered for " << node->name_; + ICHECK(node->generic_func_ != nullptr) << "No generic function registered for " << node->name_; func = node->generic_func_; } diff --git a/src/target/intrin_rule.cc b/src/target/intrin_rule.cc index fa0ee38d8130..0808d237fc28 100644 --- a/src/target/intrin_rule.cc +++ b/src/target/intrin_rule.cc @@ -81,7 +81,7 @@ TVM_REGISTER_GLOBAL("tvm.intrin.rule.default.rsqrt") .set_body([](const TVMArgs& args, TVMRetValue* rv) { PrimExpr e = args[0]; const CallNode* call = e.as(); - CHECK(call != nullptr); + ICHECK(call != nullptr); auto one = make_const(call->args[0].dtype(), 1); *rv = one / sqrt(call->args[0]); @@ -93,7 +93,7 @@ TVM_REGISTER_GLOBAL("tvm.intrin.rule.default.sigmoid") .set_body([](const TVMArgs& args, TVMRetValue* rv) { PrimExpr e = args[0]; const CallNode* call = e.as(); - CHECK(call != nullptr); + ICHECK(call != nullptr); auto one = make_const(call->args[0].dtype(), 1); *rv = one / (one + exp(-call->args[0])); @@ -103,7 +103,7 @@ TVM_REGISTER_GLOBAL("tvm.intrin.rule.default.isfinite") .set_body([](const TVMArgs& args, TVMRetValue* rv) { PrimExpr e = args[0]; const CallNode* call = e.as(); - CHECK(call != nullptr); + ICHECK(call != nullptr); *rv = isfinite(call->args[0]); }); @@ -111,7 +111,7 @@ TVM_REGISTER_GLOBAL("tvm.intrin.rule.default.isinf") .set_body([](const TVMArgs& args, TVMRetValue* rv) { PrimExpr e = args[0]; const CallNode* call = e.as(); - CHECK(call != nullptr); + ICHECK(call != nullptr); *rv = isinf(call->args[0]); }); @@ -121,7 +121,7 @@ TVM_REGISTER_GLOBAL("tvm.intrin.rule.default.q_multiply_shift") PrimExpr e = args[0]; const tir::CallNode* call = e.as(); - CHECK(call != nullptr); + ICHECK(call != nullptr); PrimExpr x = call->args[0]; PrimExpr y = call->args[1]; @@ -129,8 +129,8 @@ TVM_REGISTER_GLOBAL("tvm.intrin.rule.default.q_multiply_shift") PrimExpr s = call->args[3]; // Only int32 types are supported (any number of lanes is allowed) - CHECK(y.dtype().code() == DLDataTypeCode::kDLInt && y.dtype().bits() == 32); - CHECK(s.dtype().code() == DLDataTypeCode::kDLInt && s.dtype().bits() == 32); + ICHECK(y.dtype().code() == DLDataTypeCode::kDLInt && y.dtype().bits() == 32); + ICHECK(s.dtype().code() == DLDataTypeCode::kDLInt && s.dtype().bits() == 32); DataType hp_dtype = DataType::Int(64, x.dtype().lanes()); DataType lp_dtype = DataType::Int(32, x.dtype().lanes()); diff --git a/src/target/intrin_rule.h b/src/target/intrin_rule.h index 359c5b9580b5..69196e1b2c39 100644 --- a/src/target/intrin_rule.h +++ b/src/target/intrin_rule.h @@ -58,13 +58,13 @@ template inline void DispatchPureExtern(const TVMArgs& args, TVMRetValue* rv) { PrimExpr e = args[0]; const CallNode* call = e.as(); - CHECK(call != nullptr); + ICHECK(call != nullptr); // Use string based dispatch to extern for backward compact // TODO(tvm-team) replace once the new dispatching system is inplace. const OpNode* op = call->op.as(); - CHECK(op != nullptr); + ICHECK(op != nullptr); std::string name = op->name; - CHECK_EQ(name.substr(0, 4), "tir."); + ICHECK_EQ(name.substr(0, 4), "tir."); name = T()(call->dtype, name.substr(4)); if (name.length() != 0) { diff --git a/src/target/llvm/codegen_amdgpu.cc b/src/target/llvm/codegen_amdgpu.cc index 1f6eedde0b21..2890c1ce3e56 100644 --- a/src/target/llvm/codegen_amdgpu.cc +++ b/src/target/llvm/codegen_amdgpu.cc @@ -70,11 +70,11 @@ class CodeGenAMDGPU : public CodeGenLLVM { } void VisitStmt_(const AllocateNode* op) final { - CHECK(!is_zero(op->condition)); + ICHECK(!is_zero(op->condition)); llvm::Value* buf = nullptr; int32_t constant_size = op->constant_allocation_size(); - CHECK_GT(constant_size, 0) << "Can only handle constant size stack allocation in GPU"; + ICHECK_GT(constant_size, 0) << "Can only handle constant size stack allocation in GPU"; StorageInfo& info = alloc_storage_info_[op->buffer_var.get()]; if (constant_size % 4 == 0 && info.alignment == 0) { @@ -99,7 +99,7 @@ class CodeGenAMDGPU : public CodeGenLLVM { } buf = alloca; } else { - CHECK(info.scope.rank == runtime::StorageRank::kShared) + ICHECK(info.scope.rank == runtime::StorageRank::kShared) << "Can only allocate shared or local memory inside kernel"; // Shared memory: address space == 3 const unsigned shared_address_space = 3; @@ -120,7 +120,7 @@ class CodeGenAMDGPU : public CodeGenLLVM { buf = builder_->CreatePointerCast( buf, DTypeToLLVMType(op->dtype)->getPointerTo(buf->getType()->getPointerAddressSpace())); - CHECK(!var_map_.count(op->buffer_var.get())); + ICHECK(!var_map_.count(op->buffer_var.get())); var_map_[op->buffer_var.get()] = buf; this->VisitStmt(op->body); } @@ -144,7 +144,7 @@ class CodeGenAMDGPU : public CodeGenLLVM { LOG(FATAL) << "unknown workitem idx"; } } else { - CHECK_EQ(ts.rank, 0); + ICHECK_EQ(ts.rank, 0); switch (ts.dim_index) { case 0: intrin_id = ::llvm::Intrinsic::amdgcn_workgroup_id_x; @@ -207,7 +207,7 @@ runtime::Module BuildAMDGPU(IRModule mod, Target target) { cg->Init("TVMAMDGPUModule", tm.get(), ctx.get(), false, false, false); for (auto kv : mod->functions) { - CHECK(kv.second->IsInstance()) << "Can only lower IR Module with PrimFuncs"; + ICHECK(kv.second->IsInstance()) << "Can only lower IR Module with PrimFuncs"; auto f = Downcast(kv.second); cg->AddFunction(f); } @@ -249,13 +249,13 @@ runtime::Module BuildAMDGPU(IRModule mod, Target target) { llvm::legacy::PassManager pass; #if TVM_LLVM_VERSION <= 60 - CHECK(tm->addPassesToEmitFile(pass, destObj, llvm::TargetMachine::CGFT_ObjectFile) == 0) + ICHECK(tm->addPassesToEmitFile(pass, destObj, llvm::TargetMachine::CGFT_ObjectFile) == 0) << "Cannot emit target CGFT_ObjectFile"; #elif TVM_LLVM_VERSION <= 90 - CHECK(tm->addPassesToEmitFile(pass, destObj, nullptr, llvm::TargetMachine::CGFT_ObjectFile) == 0) + ICHECK(tm->addPassesToEmitFile(pass, destObj, nullptr, llvm::TargetMachine::CGFT_ObjectFile) == 0) << "Cannot emit target CGFT_ObjectFile"; #else - CHECK(tm->addPassesToEmitFile(pass, destObj, nullptr, llvm::CGFT_ObjectFile) == 0) + ICHECK(tm->addPassesToEmitFile(pass, destObj, nullptr, llvm::CGFT_ObjectFile) == 0) << "Cannot emit target CGFT_ObjectFile"; #endif pass.run(*mObj); @@ -263,21 +263,21 @@ runtime::Module BuildAMDGPU(IRModule mod, Target target) { llvm::legacy::PassManager passAsm; #if TVM_LLVM_VERSION <= 60 - CHECK(tm->addPassesToEmitFile(passAsm, destAsm, llvm::TargetMachine::CGFT_AssemblyFile) == 0) + ICHECK(tm->addPassesToEmitFile(passAsm, destAsm, llvm::TargetMachine::CGFT_AssemblyFile) == 0) << "Cannot emit target CGFT_AssemblyFile"; #elif TVM_LLVM_VERSION <= 90 - CHECK(tm->addPassesToEmitFile(passAsm, destAsm, nullptr, - llvm::TargetMachine::CGFT_AssemblyFile) == 0) + ICHECK(tm->addPassesToEmitFile(passAsm, destAsm, nullptr, + llvm::TargetMachine::CGFT_AssemblyFile) == 0) << "Cannot emit target CGFT_AssemblyFile"; #else - CHECK(tm->addPassesToEmitFile(passAsm, destAsm, nullptr, llvm::CGFT_AssemblyFile) == 0) + ICHECK(tm->addPassesToEmitFile(passAsm, destAsm, nullptr, llvm::CGFT_AssemblyFile) == 0) << "Cannot emit target CGFT_AssemblyFile"; #endif passAsm.run(*mAsm); std::string assembly(dataAsm.begin(), dataAsm.end()); const auto* f = tvm::runtime::Registry::Get("tvm_callback_rocm_link"); - CHECK(f != nullptr) << "Require tvm_callback_rocm_link to exist, do import tvm.contrib.rocm"; + ICHECK(f != nullptr) << "Require tvm_callback_rocm_link to exist, do import tvm.contrib.rocm"; TVMByteArray arr; arr.data = &obj[0]; diff --git a/src/target/llvm/codegen_arm.cc b/src/target/llvm/codegen_arm.cc index 5e5a94b50064..06f1dfeb1a2d 100644 --- a/src/target/llvm/codegen_arm.cc +++ b/src/target/llvm/codegen_arm.cc @@ -89,7 +89,7 @@ PrimExpr CodeGenARM::ARMPopcount(const CallNode* call) { PrimExpr input8 = reinterpret(uint8_type, e); // Popcount 8bit->8bit const CallNode* c0 = input8.as(); - CHECK(c0 != nullptr); + ICHECK(c0 != nullptr); Array vcnt8_args; vcnt8_args.push_back(IntImm(DataType::UInt(32), ctpop_id)); vcnt8_args.push_back(IntImm(DataType::UInt(32), 1)); diff --git a/src/target/llvm/codegen_cpu.cc b/src/target/llvm/codegen_cpu.cc index d15c6151edc5..fea5f8036678 100644 --- a/src/target/llvm/codegen_cpu.cc +++ b/src/target/llvm/codegen_cpu.cc @@ -119,13 +119,13 @@ void CodeGenCPU::AddFunction(const PrimFunc& f) { CodeGenLLVM::AddFunction(f); if (f_tvm_register_system_symbol_ != nullptr) { auto global_symbol = f->GetAttr(tvm::attr::kGlobalSymbol); - CHECK(global_symbol.defined()) + ICHECK(global_symbol.defined()) << "CodeGenLLVM: Expect PrimFunc to have the global_symbol attribute"; export_system_symbols_.emplace_back( std::make_pair(global_symbol.value().operator std::string(), function_)); } else if (target_c_runtime_) { auto global_symbol = f->GetAttr(tvm::attr::kGlobalSymbol); - CHECK(global_symbol.defined()) + ICHECK(global_symbol.defined()) << "CodeGenLLVM: Expect PrimFunc to have the global_symbol attribute"; registry_functions_.emplace_back( std::make_pair(global_symbol.value().operator std::string(), function_)); @@ -136,7 +136,7 @@ void CodeGenCPU::AddFunction(const PrimFunc& f) { // Following Glow |DebugInfo::generateFunctionDebugInfo|, https://git.io/fjadv void CodeGenCPU::AddDebugInformation(llvm::Function* function) { #if TVM_LLVM_VERSION >= 50 && TVM_LLVM_VERSION < 70 - CHECK(!function->getSubprogram()); + ICHECK(!function->getSubprogram()); llvm::SmallVector paramTys; llvm::DIType* returnTy = getDebugType(builder_.get(), dbg_info_->di_builder_.get(), function->getReturnType()); @@ -159,9 +159,9 @@ void CodeGenCPU::AddDebugInformation(llvm::Function* function) { true, 0 /* line number */, llvm::DINode::FlagPrototyped, true /* isOptimized */); #endif - CHECK(DIFunction); + ICHECK(DIFunction); function->setSubprogram(DIFunction); - CHECK_EQ(function->getSubprogram(), DIFunction); + ICHECK_EQ(function->getSubprogram(), DIFunction); IRBuilder builder(&function->getEntryBlock()); if (!function->getEntryBlock().empty()) { @@ -223,7 +223,7 @@ llvm::DIType* CodeGenCPU::getDebugType(IRBuilder* builder, llvm::DIBuilder* di_b void CodeGenCPU::AddMainFunction(const std::string& entry_func_name) { llvm::Function* f = module_->getFunction(entry_func_name); - CHECK(f) << "Function " << entry_func_name << "does not in module"; + ICHECK(f) << "Function " << entry_func_name << "does not in module"; llvm::Type* type = llvm::ArrayType::get(t_char_, entry_func_name.length() + 1); llvm::GlobalVariable* global = new llvm::GlobalVariable(*module_, type, true, llvm::GlobalValue::WeakAnyLinkage, nullptr, @@ -258,7 +258,7 @@ llvm::Value* CodeGenCPU::CreateStructRefPtr(DataType t, llvm::Value* buf, llvm:: if (buf->getType() == t_void_p_) { buf = builder_->CreatePointerCast(buf, t_tvm_array_->getPointerTo()); } else { - CHECK_EQ(buf->getType(), t_tvm_array_->getPointerTo()); + ICHECK_EQ(buf->getType(), t_tvm_array_->getPointerTo()); } } switch (kind) { @@ -296,8 +296,8 @@ llvm::Value* CodeGenCPU::CreateStructRefPtr(DataType t, llvm::Value* buf, llvm:: return builder_->CreateInBoundsGEP(buf, {index, ConstInt32(1), ConstInt32(0)}); } case builtin::kTVMValueContent: { - CHECK_EQ(t.lanes(), 1); - CHECK(t.is_handle() || t.bits() == 64); + ICHECK_EQ(t.lanes(), 1); + ICHECK(t.is_handle() || t.bits() == 64); if (t.is_int()) { buf = builder_->CreatePointerCast(buf, t_int64_->getPointerTo()); return builder_->CreateInBoundsGEP(buf, index); @@ -305,7 +305,7 @@ llvm::Value* CodeGenCPU::CreateStructRefPtr(DataType t, llvm::Value* buf, llvm:: buf = builder_->CreatePointerCast(buf, t_float64_->getPointerTo()); return builder_->CreateInBoundsGEP(buf, index); } else { - CHECK(t.is_handle()); + ICHECK(t.is_handle()); buf = builder_->CreatePointerCast(buf, t_tvm_value_->getPointerTo()); buf = builder_->CreateInBoundsGEP(buf, index); return builder_->CreatePointerCast(buf, t_void_p_->getPointerTo()); @@ -377,7 +377,7 @@ llvm::GlobalVariable* CodeGenCPU::InitContextPtr(llvm::Type* p_type, std::string } llvm::Value* CodeGenCPU::GetContextPtr(llvm::GlobalVariable* gv) { - CHECK(gv != nullptr); + ICHECK(gv != nullptr); #if TVM_LLVM_VERSION >= 110 llvm::LoadInst* faddr = builder_->CreateAlignedLoad(gv, llvm::Align(gv->getAlignment())); #else @@ -496,7 +496,7 @@ llvm::Value* CodeGenCPU::PackClosureData(const Array& vfields, uint64_t* nu std::vector fields; for (Var v : vfields) { auto it = var_map_.find(v.get()); - CHECK(it != var_map_.end()); + ICHECK(it != var_map_.end()); fields.push_back(it->second->getType()); } llvm::StructType* tcdata = llvm::StructType::create(fields); @@ -563,7 +563,7 @@ void CodeGenCPU::CreateParallelLaunch(const Stmt& body, int num_task) { std::swap(var_map_, new_vmap); std::swap(parallel_env_, par_env); std::swap(function_, f); - CHECK_NE(par_env.parallel_loop_count, 0) << "Cannot find parallel loop within parallel launch"; + ICHECK_NE(par_env.parallel_loop_count, 0) << "Cannot find parallel loop within parallel launch"; builder_->SetInsertPoint(par_launch_end); } @@ -606,7 +606,7 @@ void CodeGenCPU::CreateStaticInit(const std::string& init_fname, const Stmt& bod // setup new variable map, swap it with current var context. std::unordered_map new_vmap; UnpackClosureData(cdata, vfields, &new_vmap); - CHECK(parallel_env_.penv == nullptr); + ICHECK(parallel_env_.penv == nullptr); std::swap(function_, f); std::swap(var_map_, new_vmap); this->VisitStmt(body); @@ -697,7 +697,7 @@ llvm::BasicBlock* CodeGenCPU::MakeCallPacked(const Array& args, llvm:: llvm::Value* handle = GetPackedFuncHandle(func_name); // call the function int64_t nargs = end - begin; - CHECK_GE(nargs, 0); + ICHECK_GE(nargs, 0); llvm::Value* stack_value = MakeValue(args[1]); llvm::Value* stack_tcode = MakeValue(args[2]); llvm::Value* arg_value = builder_->CreateInBoundsGEP( @@ -726,7 +726,7 @@ llvm::BasicBlock* CodeGenCPU::MakeCallPacked(const Array& args, llvm:: } llvm::Value* CodeGenCPU::CreateCallPacked(const CallNode* op) { - CHECK_EQ(op->args.size(), 5U); + ICHECK_EQ(op->args.size(), 5U); llvm::Value* rvalue = nullptr; llvm::Value* ret_tcode = nullptr; MakeCallPacked(op->args, &rvalue, &ret_tcode, op->dtype, op->args[3].as()->value, @@ -736,7 +736,7 @@ llvm::Value* CodeGenCPU::CreateCallPacked(const CallNode* op) { llvm::Value* CodeGenCPU::CreateCallTracePacked(const CallNode* op) { using llvm::BasicBlock; - CHECK_EQ(op->args.size(), 6U); + ICHECK_EQ(op->args.size(), 6U); llvm::Value* rvalue = nullptr; llvm::Value* ret_tcode = nullptr; BasicBlock* end_block = @@ -793,7 +793,7 @@ llvm::Value* CodeGenCPU::RuntimeTVMParallelBarrier() { void CodeGenCPU::AddStartupFunction() { if (registry_functions_.size() != 0) { - CHECK(is_system_lib_) << "Loading of --system-lib modules is yet to be defined for C runtime"; + ICHECK(is_system_lib_) << "Loading of --system-lib modules is yet to be defined for C runtime"; std::vector symbols; std::vector funcs; for (auto sym : registry_functions_) { @@ -861,7 +861,7 @@ llvm::Value* CodeGenCPU::CreateIntrinsic(const CallNode* op) { builder_->SetInsertPoint(new_bb); return ConstInt32(-1); } else if (op->op.same_as(builtin::tvm_struct_get())) { - CHECK_EQ(op->args.size(), 3U); + ICHECK_EQ(op->args.size(), 3U); int kind = op->args[2].as()->value; llvm::Value* ref = this->CreateStructRefPtr(op->dtype, MakeValue(op->args[0]), MakeValue(op->args[1]), kind); @@ -871,23 +871,23 @@ llvm::Value* CodeGenCPU::CreateIntrinsic(const CallNode* op) { return builder_->CreateLoad(ref); } } else if (op->op.same_as(builtin::tvm_struct_set())) { - CHECK_EQ(op->args.size(), 4U); + ICHECK_EQ(op->args.size(), 4U); int kind = op->args[2].as()->value; llvm::Value* value = MakeValue(op->args[3]); llvm::Value* ref = this->CreateStructRefPtr(op->args[3].dtype(), MakeValue(op->args[0]), MakeValue(op->args[1]), kind); - CHECK(kind != builtin::kArrAddr); + ICHECK(kind != builtin::kArrAddr); if (value->getType()->isPointerTy()) { value = builder_->CreatePointerCast(value, ref->getType()->getPointerElementType()); } builder_->CreateStore(value, ref); return ConstInt32(0); } else if (op->op.same_as(builtin::tvm_stack_alloca())) { - CHECK_EQ(op->args.size(), 2U); + ICHECK_EQ(op->args.size(), 2U); const std::string& type = op->args[0].as()->value; return WithFunctionEntry([&]() -> llvm::AllocaInst* { const int64_t* pval = as_const_int(op->args[1]); - CHECK(pval) << "require stack alloca to contain constant value"; + ICHECK(pval) << "require stack alloca to contain constant value"; llvm::Value* num = ConstInt32(pval[0]); if (type == "shape") { return builder_->CreateAlloca(t_tvm_shape_index_, num); @@ -941,15 +941,15 @@ void CodeGenCPU::VisitStmt_(const AttrStmtNode* op) { this->CreateComputeScope(op); } else if (tir::attr::IsPragmaKey(op->attr_key)) { if (op->attr_key == "pragma_parallel_stride_pattern") { - CHECK(parallel_env_.penv != nullptr) + ICHECK(parallel_env_.penv != nullptr) << "Pragma parallel_stride_pattern only valid in parallel launch"; parallel_env_.stride_pattern = true; this->VisitStmt(op->body); } else if (op->attr_key == "pragma_parallel_launch_point") { CreateParallelLaunch(op->body, 0); } else if (op->attr_key == "pragma_parallel_barrier_when_finish") { - CHECK(parallel_env_.penv != nullptr) << "Cannot run barrier without parallel environment"; - CHECK(!parallel_env_.in_parallel_loop) + ICHECK(parallel_env_.penv != nullptr) << "Cannot run barrier without parallel environment"; + ICHECK(!parallel_env_.in_parallel_loop) << "Cannot not place within parallel loop as the workload may differ, " << " place it between parallel and parallel_launch_point"; this->VisitStmt(op->body); @@ -962,7 +962,7 @@ void CodeGenCPU::VisitStmt_(const AttrStmtNode* op) { builder_->CreateCall(bar_callee, {MakeValue(parallel_env_.task_id), parallel_env_.penv}); } else if (op->attr_key == tir::attr::pragma_import_llvm) { const StringImmNode* value = op->value.as(); - CHECK(value != nullptr); + ICHECK(value != nullptr); this->HandleImport(value->value); this->VisitStmt(op->body); } else { @@ -975,7 +975,7 @@ void CodeGenCPU::VisitStmt_(const AttrStmtNode* op) { } void CodeGenCPU::VisitStmt_(const ForNode* op) { - CHECK(is_zero(op->min)); + ICHECK(is_zero(op->min)); if (op->for_type == ForType::Serial || op->for_type == ForType::Unrolled) { CodeGenLLVM::VisitStmt_(op); } else if (op->for_type == ForType::Parallel) { @@ -984,13 +984,13 @@ void CodeGenCPU::VisitStmt_(const ForNode* op) { For(op->loop_var, op->min, op->extent, op->for_type, op->device_api, op->body), 0); } else { // already in parallel env. - CHECK(parallel_env_.task_id.defined()); - CHECK(parallel_env_.num_task.defined()); - CHECK(parallel_env_.penv != nullptr); + ICHECK(parallel_env_.task_id.defined()); + ICHECK(parallel_env_.num_task.defined()); + ICHECK(parallel_env_.penv != nullptr); DataType t = op->extent.dtype(); PrimExpr num_task = cast(t, parallel_env_.num_task); PrimExpr task_id = cast(t, parallel_env_.task_id); - CHECK(!parallel_env_.in_parallel_loop) + ICHECK(!parallel_env_.in_parallel_loop) << "Nested parallel loop is not supported by threadpool, try fuse them instead"; parallel_env_.in_parallel_loop = true; if (parallel_env_.stride_pattern) { diff --git a/src/target/llvm/codegen_hexagon.cc b/src/target/llvm/codegen_hexagon.cc index a7e96c95e07f..c1af2a366a6b 100644 --- a/src/target/llvm/codegen_hexagon.cc +++ b/src/target/llvm/codegen_hexagon.cc @@ -48,7 +48,7 @@ namespace codegen { static std::string get_name(const PrimFunc& f) { auto global_symbol = f->GetAttr(tvm::attr::kGlobalSymbol); - CHECK(global_symbol.defined()) + ICHECK(global_symbol.defined()) << "CodeGenLLVM: Expect PrimFunc to have the global_symbol attribute"; return std::string(global_symbol.value()); } @@ -139,9 +139,9 @@ void CodeGenHexagon::InitTarget(llvm::TargetMachine* tm) { if (len_end != npos) { int hvx_bytes = 0; len_begin += std::strlen(hvx_length_feature); - CHECK(!fs.substr(len_begin, len_end - len_begin).getAsInteger(10, hvx_bytes)) + ICHECK(!fs.substr(len_begin, len_end - len_begin).getAsInteger(10, hvx_bytes)) << "invalid HVX length in feature string: " << fs.str(); - CHECK(hvx_bytes == 64 || hvx_bytes == 128) + ICHECK(hvx_bytes == 64 || hvx_bytes == 128) << "invalid HVX vector length: " << hvx_bytes << ", should be 64 or 128"; native_vector_bits_ = hvx_bytes * 8; } @@ -249,7 +249,7 @@ llvm::GlobalVariable* CodeGenHexagon::InitContextPtr(llvm::Type* p_type, std::st } llvm::Value* CodeGenHexagon::GetContextPtr(llvm::GlobalVariable* gv) { - CHECK(gv != nullptr); + ICHECK(gv != nullptr); #if TVM_LLVM_VERSION >= 110 llvm::LoadInst* faddr = builder_->CreateAlignedLoad(gv, llvm::Align(gv->getAlignment())); #else @@ -305,7 +305,7 @@ llvm::BasicBlock* CodeGenHexagon::MakeCallPacked(const Array& args, ll llvm::Value* handle = GetPackedFuncHandle(func_name); // call the function int64_t nargs = end - begin; - CHECK_GE(nargs, 0); + ICHECK_GE(nargs, 0); llvm::Value* stack_value = MakeValue(args[1]); llvm::Value* stack_tcode = MakeValue(args[2]); llvm::Value* arg_value = builder_->CreateInBoundsGEP( @@ -416,7 +416,7 @@ llvm::Value* CodeGenHexagon::CreateCallPacked(const CallNode* op) { return ConstInt32(0); } - CHECK_EQ(op->args.size(), 5U); + ICHECK_EQ(op->args.size(), 5U); llvm::Value* rvalue = nullptr; llvm::Value* ret_tcode = nullptr; MakeCallPacked(op->args, &rvalue, &ret_tcode, op->dtype, op->args[3].as()->value, @@ -426,7 +426,7 @@ llvm::Value* CodeGenHexagon::CreateCallPacked(const CallNode* op) { llvm::Value* CodeGenHexagon::CreateCallTracePacked(const CallNode* op) { using llvm::BasicBlock; - CHECK_EQ(op->args.size(), 6U); + ICHECK_EQ(op->args.size(), 6U); llvm::Value* rvalue = nullptr; llvm::Value* ret_tcode = nullptr; BasicBlock* end_block = @@ -506,7 +506,7 @@ llvm::Value* CodeGenHexagon::CreateIntrinsic(const CallNode* op) { } else if (op->op.same_as(builtin::tvm_call_trace_packed_lowered())) { return CreateCallTracePacked(op); } else if (op->op.same_as(builtin::tvm_struct_get())) { - CHECK_EQ(op->args.size(), 3); + ICHECK_EQ(op->args.size(), 3); int kind = op->args[2].as()->value; llvm::Value* ref = CreateStructRefPtr(op->dtype, MakeValue(op->args[0]), MakeValue(op->args[1]), kind); @@ -515,9 +515,9 @@ llvm::Value* CodeGenHexagon::CreateIntrinsic(const CallNode* op) { } return builder_->CreateLoad(ref); } else if (op->op.same_as(builtin::tvm_struct_set())) { - CHECK_EQ(op->args.size(), 4); + ICHECK_EQ(op->args.size(), 4); int kind = op->args[2].as()->value; - CHECK(kind != builtin::kArrAddr); + ICHECK(kind != builtin::kArrAddr); llvm::Value* ref = CreateStructRefPtr(op->args[3].dtype(), MakeValue(op->args[0]), MakeValue(op->args[1]), kind); llvm::Value* value = MakeValue(op->args[3]); @@ -527,7 +527,7 @@ llvm::Value* CodeGenHexagon::CreateIntrinsic(const CallNode* op) { builder_->CreateStore(value, ref); return ConstInt32(0); } else if (op->op.same_as(builtin::tvm_stack_alloca())) { - CHECK_EQ(op->args.size(), 2); + ICHECK_EQ(op->args.size(), 2); const std::string& name = op->args[0].as()->value; llvm::Value* size = ConstInt32(op->args[1].as()->value); return builder_->CreateAlloca(types_for_alloca_.at(name), size); @@ -559,7 +559,7 @@ llvm::Value* CodeGenHexagon::CreateStructRefPtr(DataType t, llvm::Value* buf, ll if (buf->getType() == t_void_p_) { buf = builder_->CreatePointerCast(buf, t_tvm_array_->getPointerTo()); } else { - CHECK_EQ(buf->getType(), t_tvm_array_->getPointerTo()); + ICHECK_EQ(buf->getType(), t_tvm_array_->getPointerTo()); } /* The following "kinds" are accessing the members of DLTensor: typedef struct { @@ -605,8 +605,8 @@ llvm::Value* CodeGenHexagon::CreateStructRefPtr(DataType t, llvm::Value* buf, ll TVMContext v_ctx; } TVMValue; */ - CHECK_EQ(t.lanes(), 1); - CHECK(t.is_handle() || t.bits() == 64); + ICHECK_EQ(t.lanes(), 1); + ICHECK(t.is_handle() || t.bits() == 64); if (t.is_int()) { buf = builder_->CreatePointerCast(buf, t_int64_->getPointerTo()); return builder_->CreateInBoundsGEP(buf, index); @@ -614,7 +614,7 @@ llvm::Value* CodeGenHexagon::CreateStructRefPtr(DataType t, llvm::Value* buf, ll buf = builder_->CreatePointerCast(buf, t_float64_->getPointerTo()); return builder_->CreateInBoundsGEP(buf, index); } else { - CHECK(t.is_handle()); + ICHECK(t.is_handle()); buf = builder_->CreatePointerCast(buf, t_tvm_value_->getPointerTo()); buf = builder_->CreateInBoundsGEP(buf, index); return builder_->CreatePointerCast(buf, t_void_p_->getPointerTo()); @@ -708,7 +708,7 @@ runtime::Module BuildHexagon(IRModule mod, Target target) { std::unique_ptr ctx(new llvm::LLVMContext()); cg->Init("TVMHexagonModule", tm.get(), ctx.get(), false, false, false); for (auto kv : mod->functions) { - CHECK(kv.second->IsInstance()) << "Can only lower IR Module with PrimFuncs"; + ICHECK(kv.second->IsInstance()) << "Can only lower IR Module with PrimFuncs"; auto f = Downcast(kv.second); cg->AddFunction(f); } @@ -740,7 +740,7 @@ runtime::Module BuildHexagon(IRModule mod, Target target) { llvm::raw_svector_ostream os(ss); std::unique_ptr cm = CloneModule(m); legacy::PassManager pass; - CHECK(tm->addPassesToEmitFile(pass, os, nullptr, ft) == 0) << "Cannot emit target code"; + ICHECK(tm->addPassesToEmitFile(pass, os, nullptr, ft) == 0) << "Cannot emit target code"; pass.run(*cm.get()); out.assign(ss.c_str(), ss.size()); } @@ -752,13 +752,13 @@ runtime::Module BuildHexagon(IRModule mod, Target target) { llvm::SmallString<64> file_name; int fd; std::error_code ec = llvm::sys::fs::createTemporaryFile("tvm", suffix, fd, file_name); - CHECK_EQ(static_cast(ec), false) << ec.message(); + ICHECK_EQ(static_cast(ec), false) << ec.message(); llvm::raw_fd_ostream file(fd, true); file << data; - CHECK(!file.has_error()) << file.error().message(); + ICHECK(!file.has_error()) << file.error().message(); // If there is an error, execution will never get here, but return // {ec, name} anyway to allow caller to handle error conditions. - // This way the "CHECK" above can be removed with minimal effort. + // This way the "ICHECK" above can be removed with minimal effort. return std::make_pair(file.error(), std::string(file_name.c_str())); }; @@ -772,12 +772,12 @@ runtime::Module BuildHexagon(IRModule mod, Target target) { so_name += "so"; const auto* f = tvm::runtime::Registry::Get("tvm.contrib.hexagon.link_shared"); - CHECK(f != nullptr) << "tvm.contrib.hexagon.link_shared does not to exist, " - "do import tvm.contrib.hexagon"; + ICHECK(f != nullptr) << "tvm.contrib.hexagon.link_shared does not to exist, " + "do import tvm.contrib.hexagon"; Array o_names = {StringImm(o_name)}; int rc = (*f)(so_name, o_names); - CHECK(rc == 0) << "Failed to link " << so_name; + ICHECK(rc == 0) << "Failed to link " << so_name; // Move it to ExtractFuncInfo? std::set export_abi; diff --git a/src/target/llvm/codegen_llvm.cc b/src/target/llvm/codegen_llvm.cc index 9bc56dc91458..2a7e4644571b 100644 --- a/src/target/llvm/codegen_llvm.cc +++ b/src/target/llvm/codegen_llvm.cc @@ -108,7 +108,7 @@ void CodeGenLLVM::InitFuncState() { void CodeGenLLVM::AddFunctionInternal(const PrimFunc& f, bool ret_void) { this->InitFuncState(); - CHECK_EQ(f->buffer_map.size(), 0U) + ICHECK_EQ(f->buffer_map.size(), 0U) << "Cannot codegen function with buffer_map, please lower them first"; std::vector param_types; @@ -126,9 +126,9 @@ void CodeGenLLVM::AddFunctionInternal(const PrimFunc& f, bool ret_void) { llvm::FunctionType::get(ret_void ? t_void_ : t_int_, param_types, false); auto global_symbol = f->GetAttr(tvm::attr::kGlobalSymbol); - CHECK(global_symbol.defined()) + ICHECK(global_symbol.defined()) << "CodeGenLLVM: Expect PrimFunc to have the global_symbol attribute"; - CHECK(module_->getFunction(static_cast(global_symbol.value())) == nullptr) + ICHECK(module_->getFunction(static_cast(global_symbol.value())) == nullptr) << "Function " << global_symbol << " already exist in module"; function_ = llvm::Function::Create(ftype, llvm::Function::ExternalLinkage, @@ -182,7 +182,7 @@ void CodeGenLLVM::AddFunctionInternal(const PrimFunc& f, bool ret_void) { std::unique_ptr CodeGenLLVM::Finish() { this->AddStartupFunction(); for (size_t i = 0; i < link_modules_.size(); ++i) { - CHECK(!llvm::Linker::linkModules(*module_, std::move(link_modules_[i]))) + ICHECK(!llvm::Linker::linkModules(*module_, std::move(link_modules_[i]))) << "Failed to link modules"; } link_modules_.clear(); @@ -302,7 +302,7 @@ unsigned CodeGenLLVM::GetGlobalAddressSpace() const { return 0; } llvm::Type* CodeGenLLVM::DTypeToLLVMType(const DataType& dtype) const { if (dtype.is_handle()) { - CHECK_EQ(dtype.lanes(), 1); + ICHECK_EQ(dtype.lanes(), 1); return t_void_p_; } if (dtype.is_void()) { @@ -489,7 +489,7 @@ llvm::Value* CodeGenLLVM::CreateBroadcast(llvm::Value* value, int lanes) { llvm::Value* CodeGenLLVM::CreateVecSlice(llvm::Value* vec, int begin, int extent) { int num_elems = GetVectorNumElements(vec); if (extent == num_elems && begin == 0) return vec; - CHECK(begin >= 0 && extent <= num_elems) << "Slicing out of bound!\n"; + ICHECK(begin >= 0 && extent <= num_elems) << "Slicing out of bound!\n"; std::vector indices; indices.reserve(extent); for (int i = 0; i < extent; ++i) { @@ -519,7 +519,7 @@ llvm::Value* CodeGenLLVM::CreateVecPad(llvm::Value* vec, int target_lanes) { llvm::Value* mask = llvm::UndefValue::get(DTypeToLLVMType(DataType::Int(32, target_lanes))); int num_elems = GetVectorNumElements(vec); if (num_elems == target_lanes) return vec; - CHECK_LT(num_elems, target_lanes); + ICHECK_LT(num_elems, target_lanes); for (int i = 0; i < num_elems; ++i) { mask = builder_->CreateInsertElement(mask, ConstInt32(i), ConstInt32(i)); } @@ -578,7 +578,7 @@ void CodeGenLLVM::CreateSerialFor(llvm::Value* begin, llvm::Value* end, llvm::Va builder_->SetInsertPoint(for_begin); llvm::PHINode* loop_value = builder_->CreatePHI(begin->getType(), 2); loop_value->addIncoming(begin, pre_block); - CHECK(!var_map_.count(loop_var.get())); + ICHECK(!var_map_.count(loop_var.get())); var_map_[loop_var.get()] = loop_value; builder_->CreateCondBr(CreateLT(loop_var.dtype(), loop_value, end), for_body, for_end, md_very_likely_branch_); @@ -621,7 +621,7 @@ llvm::Value* CodeGenLLVM::CreateCast(DataType from, DataType to, llvm::Value* va } else if (from.is_uint() && to.is_float()) { return builder_->CreateUIToFP(value, target); } else { - CHECK(from.is_float() && to.is_float()); + ICHECK(from.is_float() && to.is_float()); return builder_->CreateFPCast(value, target); } } @@ -647,7 +647,7 @@ llvm::Constant* CodeGenLLVM::GetConstString(const std::string& str) { llvm::Value* CodeGenLLVM::CreateBufferPtr(DataType t, llvm::Value* buffer, llvm::Value* index) { llvm::PointerType* btype = llvm::dyn_cast(buffer->getType()); - CHECK(btype != nullptr); + ICHECK(btype != nullptr); llvm::PointerType* ptype = DTypeToLLVMType(t)->getPointerTo(btype->getAddressSpace()); if (btype != ptype) { buffer = builder_->CreatePointerCast(buffer, ptype); @@ -657,7 +657,7 @@ llvm::Value* CodeGenLLVM::CreateBufferPtr(DataType t, llvm::Value* buffer, llvm: llvm::Value* CodeGenLLVM::GetVarValue(const VarNode* v) const { auto it = var_map_.find(v); - CHECK(it != var_map_.end()) << "cannot find variable " << v->name_hint; + ICHECK(it != var_map_.end()) << "cannot find variable " << v->name_hint; return it->second; } @@ -747,7 +747,7 @@ llvm::Function* CodeGenLLVM::GetIntrinsicDecl(llvm::Intrinsic::ID id, llvm::Type llvm::Value* CodeGenLLVM::CreateIntrinsic(const CallNode* op) { if (op->op.same_as(builtin_call_llvm_intrin_) || op->op.same_as(builtin_call_llvm_pure_intrin_)) { - CHECK_GE(op->args.size(), 2U); + ICHECK_GE(op->args.size(), 2U); llvm::Intrinsic::ID id = static_cast(Downcast(op->args[0])->value); int64_t num_signature = Downcast(op->args[1])->value; std::vector arg_value; @@ -768,8 +768,8 @@ llvm::Value* CodeGenLLVM::CreateIntrinsic(const CallNode* op) { llvm::Type* return_type = (id != llvm::Intrinsic::prefetch) ? GetLLVMType(GetRef(op)) : llvm::Type::getVoidTy(*ctx_); llvm::Function* f = GetIntrinsicDecl(id, return_type, arg_type); - CHECK(f) << "Cannot find intrinsic declaration, possible type mismatch: " - << llvm::Intrinsic::getName(id, {}); + ICHECK(f) << "Cannot find intrinsic declaration, possible type mismatch: " + << llvm::Intrinsic::getName(id, {}); return builder_->CreateCall(f, arg_value); } else if (op->op.same_as(builtin::bitwise_and())) { return builder_->CreateAnd(MakeValue(op->args[0]), MakeValue(op->args[1])); @@ -791,7 +791,7 @@ llvm::Value* CodeGenLLVM::CreateIntrinsic(const CallNode* op) { return CreateStorageSync(op); } else if (op->op.same_as(builtin::address_of())) { const LoadNode* l = op->args[0].as(); - CHECK(op->args.size() == 1 && l); + ICHECK(op->args.size() == 1 && l); const RampNode* r = l->index.as(); llvm::Value* ptr; unsigned addrspace; @@ -809,13 +809,13 @@ llvm::Value* CodeGenLLVM::CreateIntrinsic(const CallNode* op) { } else if (op->op.same_as(builtin::isnullptr())) { return builder_->CreateIsNull(MakeValue(op->args[0])); } else if (op->op.same_as(builtin::large_uint_imm())) { - CHECK_EQ(op->args.size(), 2U); + ICHECK_EQ(op->args.size(), 2U); uint64_t low = static_cast(Downcast(op->args[0])->value); uint64_t high = static_cast(Downcast(op->args[1])->value); uint64_t val = (high << 32U) | low; return llvm::ConstantInt::get(DTypeToLLVMType(op->dtype), val); } else if (op->op.same_as(builtin::if_then_else())) { - CHECK_EQ(op->args[0].dtype().lanes(), 1) << "if_then_else can only take scalar condition"; + ICHECK_EQ(op->args[0].dtype().lanes(), 1) << "if_then_else can only take scalar condition"; using llvm::BasicBlock; BasicBlock* then_block = BasicBlock::Create(*ctx_, "if_then", function_); BasicBlock* else_block = BasicBlock::Create(*ctx_, "if_else", function_); @@ -913,7 +913,7 @@ llvm::Value* CodeGenLLVM::VisitExpr_(const StringImmNode* op) { return GetConstS return builder_->Create##Op(a, b); \ } \ } else { \ - CHECK(t.is_float()); \ + ICHECK(t.is_float()); \ return builder_->CreateF##Op(a, b); \ } \ } \ @@ -932,7 +932,7 @@ DEFINE_CODEGEN_BINARY_OP(Mul); } else if (t.is_uint()) { \ return builder_->CreateICmpU##Op(a, b); \ } else { \ - CHECK(t.is_float()); \ + ICHECK(t.is_float()); \ return builder_->CreateFCmpO##Op(a, b); \ } \ } \ @@ -953,7 +953,7 @@ llvm::Value* CodeGenLLVM::VisitExpr_(const DivNode* op) { } else if (op->dtype.is_uint()) { return builder_->CreateUDiv(a, b); } else { - CHECK(op->dtype.is_float()); + ICHECK(op->dtype.is_float()); return builder_->CreateFDiv(a, b); } } @@ -966,7 +966,7 @@ llvm::Value* CodeGenLLVM::VisitExpr_(const ModNode* op) { } else if (op->dtype.is_uint()) { return builder_->CreateURem(a, b); } else { - CHECK(op->dtype.is_float()); + ICHECK(op->dtype.is_float()); return builder_->CreateFRem(a, b); } } @@ -1023,7 +1023,7 @@ llvm::Value* CodeGenLLVM::VisitExpr_(const SelectNode* op) { llvm::Value* CodeGenLLVM::VisitExpr_(const LetNode* op) { auto it = let_binding_.find(op->var); if (it != let_binding_.end()) { - CHECK(deep_equal_(it->second->value, op->value)) + ICHECK(deep_equal_(it->second->value, op->value)) << "Let cannot bind the same var to two different values"; } else { let_binding_[op->var] = op; @@ -1057,7 +1057,7 @@ llvm::Value* CodeGenLLVM::VisitExpr_(const LoadNode* op) { if (is_one(ramp->stride)) { int alignment, native_bits; GetAlignment(t, op->buffer_var.get(), ramp->base, &alignment, &native_bits); - CHECK_EQ(ramp->lanes, t.lanes()); + ICHECK_EQ(ramp->lanes, t.lanes()); llvm::Value* ptr = CreateBufferPtr(t.element_of(), buffer, MakeValue(ramp->base)); ptr = builder_->CreatePointerCast(ptr, DTypeToLLVMType(t)->getPointerTo(addrspace)); #if TVM_LLVM_VERSION >= 110 @@ -1093,7 +1093,7 @@ llvm::Value* CodeGenLLVM::VisitExpr_(const CallNode* op) { auto call_op = GetRef(ptr_op); if (op->op.same_as(builtin_call_extern_) || op->op.same_as(builtin_call_pure_extern_)) { // call extern intrinsic - CHECK_GE(op->args.size(), 1U); + ICHECK_GE(op->args.size(), 1U); auto global_symbol = Downcast(op->args[0]); return this->CreateCallExtern(GetType(GetRef(op)), global_symbol->value, op->args, true); @@ -1105,7 +1105,7 @@ llvm::Value* CodeGenLLVM::VisitExpr_(const CallNode* op) { return CreateIntrinsic(op); } } else { - CHECK(op->op.as()); + ICHECK(op->op.as()); LOG(FATAL) << "Do not yet support cross function call"; return nullptr; } @@ -1131,8 +1131,8 @@ llvm::Value* CodeGenLLVM::VisitExpr_(const ShuffleNode* op) { std::vector idx(op->indices.size()); for (int i = 0, e = op->indices.size(); i < e; ++i) { const int64_t* val = as_const_int(op->indices[i]); - CHECK(val && *val >= 0 && *val < total_lanes) << "Shuffled indeces are suppose to be int, " - << "but get " << op->indices[i] << "\n"; + ICHECK(val && *val >= 0 && *val < total_lanes) << "Shuffled indeces are suppose to be int, " + << "but get " << op->indices[i] << "\n"; idx[i] = *val; } llvm::Value* mask = llvm::ConstantDataVector::get(builder_->getContext(), idx); @@ -1149,7 +1149,7 @@ llvm::Value* CodeGenLLVM::VisitExpr_(const BroadcastNode* op) { } void CodeGenLLVM::VisitStmt_(const StoreNode* op) { - CHECK(is_one(op->predicate)); + ICHECK(is_one(op->predicate)); DataType t = op->value.dtype(); bool is_volatile = volatile_buf_.count(op->buffer_var.get()); llvm::Value* buffer = MakeValue(op->buffer_var); @@ -1175,7 +1175,7 @@ void CodeGenLLVM::VisitStmt_(const StoreNode* op) { if (is_one(ramp->stride)) { int alignment, native_bits; GetAlignment(t, op->buffer_var.get(), ramp->base, &alignment, &native_bits); - CHECK_EQ(ramp->lanes, t.lanes()); + ICHECK_EQ(ramp->lanes, t.lanes()); llvm::Value* ptr = CreateBufferPtr(t.element_of(), buffer, MakeValue(ramp->base)); ptr = builder_->CreatePointerCast(ptr, DTypeToLLVMType(t)->getPointerTo(addrspace)); #if TVM_LLVM_VERSION >= 110 @@ -1189,7 +1189,7 @@ void CodeGenLLVM::VisitStmt_(const StoreNode* op) { } } } - CHECK_GE(t.bits(), 8); + ICHECK_GE(t.bits(), 8); // scalarized store. int basic_align = t.bits() / 8; auto f = [&](int i, llvm::Value* index) { @@ -1207,13 +1207,13 @@ void CodeGenLLVM::VisitStmt_(const StoreNode* op) { } void CodeGenLLVM::VisitStmt_(const ForNode* op) { - CHECK(is_zero(op->min)); + ICHECK(is_zero(op->min)); analyzer_->Bind(op->loop_var, Range::FromMinExtent(op->min, op->extent)); if (op->for_type == ForType::Unrolled) { LOG(WARNING) << "Unroll hint get ignore at CodeGenLLVM backend, " << " consider set unroll_explicit=True"; } else { - CHECK(op->for_type == ForType::Serial); + ICHECK(op->for_type == ForType::Serial); } CreateSerialFor(MakeValue(op->min), MakeValue(op->extent), llvm::ConstantInt::getSigned(GetLLVMType(op->extent), 1), op->loop_var, op->body); @@ -1243,11 +1243,11 @@ void CodeGenLLVM::VisitStmt_(const IfThenElseNode* op) { } void CodeGenLLVM::VisitStmt_(const AllocateNode* op) { - CHECK(!is_zero(op->condition)); + ICHECK(!is_zero(op->condition)); llvm::Value* buf = nullptr; int32_t constant_size = op->constant_allocation_size(); - CHECK_GT(constant_size, 0) << "Can only handle constant size stack allocation"; + ICHECK_GT(constant_size, 0) << "Can only handle constant size stack allocation"; StorageInfo& info = alloc_storage_info_[op->buffer_var.get()]; if (constant_size % 4 == 0 && info.alignment == 0) { info.alignment = GetTempAllocaAlignment(op->dtype, constant_size); @@ -1271,7 +1271,7 @@ void CodeGenLLVM::VisitStmt_(const AllocateNode* op) { buf = builder_->CreatePointerCast( buf, DTypeToLLVMType(op->dtype)->getPointerTo(buf->getType()->getPointerAddressSpace())); - CHECK(!var_map_.count(op->buffer_var.get())); + ICHECK(!var_map_.count(op->buffer_var.get())); var_map_[op->buffer_var.get()] = buf; this->VisitStmt(op->body); } @@ -1287,12 +1287,12 @@ void CodeGenLLVM::VisitStmt_(const AttrStmtNode* op) { } } else if (op->attr_key == tir::attr::storage_scope) { const VarNode* v = op->node.as(); - CHECK(v); + ICHECK(v); alloc_storage_info_[v].scope = runtime::StorageScope::Create(op->value.as()->value); } else if (op->attr_key == tir::attr::storage_alignment) { const VarNode* v = op->node.as(); - CHECK(v); + ICHECK(v); alloc_storage_info_[v].alignment = static_cast(op->value.as()->value); if (var_map_.count(v) && alloc_storage_info_[v].alignment > 1) { builder_->CreateAlignmentAssumption(*data_layout_, GetVarValue(v), @@ -1300,7 +1300,7 @@ void CodeGenLLVM::VisitStmt_(const AttrStmtNode* op) { } } else if (op->attr_key == tir::attr::volatile_scope) { const VarNode* v = op->node.as(); - CHECK(v); + ICHECK(v); volatile_buf_.insert(v); } this->VisitStmt(op->body); @@ -1313,7 +1313,7 @@ void CodeGenLLVM::VisitStmt_(const AssertStmtNode* op) { void CodeGenLLVM::VisitStmt_(const LetStmtNode* op) { const VarNode* v = op->var.get(); - CHECK(!var_map_.count(v)); + ICHECK(!var_map_.count(v)); if (v->dtype.is_handle()) { if (!is_restricted_) { alias_var_set_.insert(v); diff --git a/src/target/llvm/codegen_nvptx.cc b/src/target/llvm/codegen_nvptx.cc index 601df86d10ba..22e612b11090 100644 --- a/src/target/llvm/codegen_nvptx.cc +++ b/src/target/llvm/codegen_nvptx.cc @@ -46,11 +46,11 @@ class CodeGenNVPTX : public CodeGenLLVM { } void VisitStmt_(const AllocateNode* op) final { - CHECK(!is_zero(op->condition)); + ICHECK(!is_zero(op->condition)); llvm::Value* buf = nullptr; int32_t constant_size = op->constant_allocation_size(); - CHECK_GT(constant_size, 0) << "Can only handle constant size stack allocation in GPU"; + ICHECK_GT(constant_size, 0) << "Can only handle constant size stack allocation in GPU"; StorageInfo& info = alloc_storage_info_[op->buffer_var.get()]; if (constant_size % 4 == 0 && info.alignment == 0) { info.alignment = GetTempAllocaAlignment(op->dtype, constant_size); @@ -75,7 +75,7 @@ class CodeGenNVPTX : public CodeGenLLVM { } buf = alloca; } else { - CHECK(info.scope.rank == runtime::StorageRank::kShared) + ICHECK(info.scope.rank == runtime::StorageRank::kShared) << "Can only allocate shared or local memory inside kernel"; // Shared memory: address space == 3 const unsigned shared_address_space = 3; @@ -94,7 +94,7 @@ class CodeGenNVPTX : public CodeGenLLVM { buf = builder_->CreatePointerCast( buf, DTypeToLLVMType(op->dtype)->getPointerTo(buf->getType()->getPointerAddressSpace())); - CHECK(!var_map_.count(op->buffer_var.get())); + ICHECK(!var_map_.count(op->buffer_var.get())); var_map_[op->buffer_var.get()] = buf; this->VisitStmt(op->body); } @@ -118,7 +118,7 @@ class CodeGenNVPTX : public CodeGenLLVM { LOG(FATAL) << "unknown thread idx"; } } else { - CHECK_EQ(ts.rank, 0); + ICHECK_EQ(ts.rank, 0); switch (ts.dim_index) { case 0: intrin_id = ::llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_x; @@ -238,7 +238,7 @@ llvm::Value* CodeGenNVPTX::CreateIntrinsic(const CallNode* op) { int GetCUDAComputeVersion(const Target& target) { Optional mcpu = target->GetAttr("mcpu"); - CHECK(mcpu.defined()) << "InternalError: \"-mcpu\" is undefined in the NVPTX target"; + ICHECK(mcpu.defined()) << "InternalError: \"-mcpu\" is undefined in the NVPTX target"; std::string sm_version = mcpu.value(); return std::stoi(sm_version.substr(3)); } @@ -255,7 +255,7 @@ runtime::Module BuildNVPTX(IRModule mod, Target target) { cg->Init("TVMPTXModule", tm.get(), ctx.get(), false, false, false); for (auto kv : mod->functions) { - CHECK(kv.second->IsInstance()) << "Can only lower IR Module with PrimFuncs"; + ICHECK(kv.second->IsInstance()) << "Can only lower IR Module with PrimFuncs"; auto f = Downcast(kv.second); cg->AddFunction(f); } @@ -287,14 +287,14 @@ runtime::Module BuildNVPTX(IRModule mod, Target target) { // emit ptx llvm::legacy::PassManager pass; #if TVM_LLVM_VERSION <= 60 - CHECK(tm->addPassesToEmitFile(pass, dest_ptx, llvm::TargetMachine::CGFT_AssemblyFile) == 0) + ICHECK(tm->addPassesToEmitFile(pass, dest_ptx, llvm::TargetMachine::CGFT_AssemblyFile) == 0) << "Cannot emit target CGFT_ObjectFile"; #elif TVM_LLVM_VERSION <= 90 - CHECK(tm->addPassesToEmitFile(pass, dest_ptx, nullptr, llvm::TargetMachine::CGFT_AssemblyFile) == - 0) + ICHECK(tm->addPassesToEmitFile(pass, dest_ptx, nullptr, llvm::TargetMachine::CGFT_AssemblyFile) == + 0) << "Cannot emit target CGFT_ObjectFile"; #else - CHECK(tm->addPassesToEmitFile(pass, dest_ptx, nullptr, llvm::CGFT_AssemblyFile) == 0) + ICHECK(tm->addPassesToEmitFile(pass, dest_ptx, nullptr, llvm::CGFT_AssemblyFile) == 0) << "Cannot emit target CGFT_ObjectFile"; #endif pass.run(*module); diff --git a/src/target/llvm/codegen_x86_64.cc b/src/target/llvm/codegen_x86_64.cc index a71a0226c958..c2785458a004 100644 --- a/src/target/llvm/codegen_x86_64.cc +++ b/src/target/llvm/codegen_x86_64.cc @@ -79,7 +79,7 @@ llvm::Value* CodeGenX86_64::VisitExpr_(const CastNode* op) { const auto from = op->value.dtype(); const auto to = op->dtype; if (from.is_float() && to.is_float() && from.bits() == 16 && to.bits() == 32) { - CHECK_EQ(from.lanes(), to.lanes()); + ICHECK_EQ(from.lanes(), to.lanes()); CHECK_NOTNULL(target_machine_); const auto has_avx512 = TargetHasFeature(*target_machine_, "avx512f"); @@ -128,13 +128,13 @@ llvm::Value* CodeGenX86_64::CallVectorIntrin(llvm::Intrinsic::ID id, size_t intr // Otherwise, we split the vector into intrin_lanes sized elements (widening where necessary), // compute each result, and then concatenate the vectors (slicing the result if necessary). - CHECK_LT(intrin_lanes, num_elems); + ICHECK_LT(intrin_lanes, num_elems); std::vector split_results; for (size_t i = 0; i < num_elems; i += intrin_lanes) { std::vector split_args; for (const auto& v : args) { if (v->getType()->isVectorTy()) { - CHECK_EQ(GetVectorNumElements(v), num_elems); + ICHECK_EQ(GetVectorNumElements(v), num_elems); split_args.push_back(CreateVecSlice(v, i, intrin_lanes)); } else { split_args.push_back(v); diff --git a/src/target/llvm/intrin_rule_llvm.cc b/src/target/llvm/intrin_rule_llvm.cc index abf350e2208a..4c8862bbfb63 100644 --- a/src/target/llvm/intrin_rule_llvm.cc +++ b/src/target/llvm/intrin_rule_llvm.cc @@ -47,7 +47,7 @@ TVM_REGISTER_GLOBAL("tvm.intrin.rule.llvm.exp10") using tir::make_zero; PrimExpr e = targs[0]; const tir::CallNode* call = e.as(); - CHECK(call != nullptr); + ICHECK(call != nullptr); const PrimExpr& x = call->args[0]; PrimExpr ln10 = make_const(x.dtype(), 2.302585093); PrimExpr ret = exp(x * ln10); @@ -93,7 +93,7 @@ TVM_REGISTER_GLOBAL("tvm.intrin.rule.llvm.tanh") using tir::make_zero; PrimExpr e = targs[0]; const tir::CallNode* call = e.as(); - CHECK(call != nullptr); + ICHECK(call != nullptr); const PrimExpr& x = call->args[0]; PrimExpr one = make_const(x.dtype(), 1); PrimExpr two = make_const(x.dtype(), 2); @@ -116,7 +116,7 @@ TVM_REGISTER_GLOBAL("tvm.intrin.rule.llvm.popcount") TVM_REGISTER_GLOBAL("tvm.intrin.rule.llvm.tan").set_body([](const TVMArgs& targs, TVMRetValue* rv) { PrimExpr e = targs[0]; const tir::CallNode* call = e.as(); - CHECK(call != nullptr); + ICHECK(call != nullptr); const PrimExpr& x = call->args[0]; PrimExpr tan_x = sin(x) / cos(x); *rv = tan_x; @@ -131,7 +131,7 @@ TVM_REGISTER_GLOBAL("tvm.intrin.rule.llvm.cosh") using tir::make_zero; PrimExpr e = targs[0]; const tir::CallNode* call = e.as(); - CHECK(call != nullptr); + ICHECK(call != nullptr); const PrimExpr& x = call->args[0]; PrimExpr two = make_const(x.dtype(), 2); PrimExpr neg_one = make_const(x.dtype(), -1); @@ -150,7 +150,7 @@ TVM_REGISTER_GLOBAL("tvm.intrin.rule.llvm.sinh") using tir::make_zero; PrimExpr e = targs[0]; const tir::CallNode* call = e.as(); - CHECK(call != nullptr); + ICHECK(call != nullptr); const PrimExpr& x = call->args[0]; PrimExpr two = make_const(x.dtype(), 2); PrimExpr neg_one = make_const(x.dtype(), -1); diff --git a/src/target/llvm/intrin_rule_llvm.h b/src/target/llvm/intrin_rule_llvm.h index 1a6775e92e12..99463793d8de 100644 --- a/src/target/llvm/intrin_rule_llvm.h +++ b/src/target/llvm/intrin_rule_llvm.h @@ -41,7 +41,7 @@ template inline void DispatchLLVMPureIntrin(const TVMArgs& targs, TVMRetValue* rv) { PrimExpr e = targs[0]; const tir::CallNode* call = e.as(); - CHECK(call != nullptr); + ICHECK(call != nullptr); Array cargs; // intrin id. cargs.push_back(IntImm(DataType::UInt(32), id)); @@ -57,7 +57,7 @@ template inline void DispatchLLVMIntrin(const TVMArgs& targs, TVMRetValue* rv) { PrimExpr e = targs[0]; const tir::CallNode* call = e.as(); - CHECK(call != nullptr); + ICHECK(call != nullptr); Array cargs; // intrin id. cargs.push_back(IntImm(DataType::UInt(32), id)); diff --git a/src/target/llvm/intrin_rule_nvptx.cc b/src/target/llvm/intrin_rule_nvptx.cc index 0e332940339c..bb653e8ee5e0 100644 --- a/src/target/llvm/intrin_rule_nvptx.cc +++ b/src/target/llvm/intrin_rule_nvptx.cc @@ -36,13 +36,14 @@ inline void DispatchPureExternLibDevice(const TVMArgs& args, TVMRetValue* rv) { PrimExpr e = args[0]; using namespace tir; const CallNode* call = e.as(); - CHECK(call != nullptr); - CHECK(call->dtype.bits() == 32 || call->dtype.bits() == 64) << "Only support float32 or float64."; + ICHECK(call != nullptr); + ICHECK(call->dtype.bits() == 32 || call->dtype.bits() == 64) + << "Only support float32 or float64."; const OpNode* op = call->op.as(); - CHECK(op != nullptr); + ICHECK(op != nullptr); std::string name = op->name; - CHECK_EQ(name.substr(0, 4), "tir."); + ICHECK_EQ(name.substr(0, 4), "tir."); std::ostringstream intrinsic_name; intrinsic_name << "__nv_" << name.substr(4); diff --git a/src/target/llvm/intrin_rule_rocm.cc b/src/target/llvm/intrin_rule_rocm.cc index 22ebf9b192aa..08b32ed1b946 100644 --- a/src/target/llvm/intrin_rule_rocm.cc +++ b/src/target/llvm/intrin_rule_rocm.cc @@ -36,12 +36,12 @@ inline void DispatchPureExternOCML(const TVMArgs& args, TVMRetValue* rv) { PrimExpr e = args[0]; using namespace tir; const CallNode* call = e.as(); - CHECK(call != nullptr); + ICHECK(call != nullptr); const OpNode* op = call->op.as(); - CHECK(op != nullptr); + ICHECK(op != nullptr); std::string name = op->name; - CHECK_EQ(name.substr(0, 4), "tir."); + ICHECK_EQ(name.substr(0, 4), "tir."); std::ostringstream intrinsic_name; intrinsic_name << "__ocml_" << name.substr(4) << "_f" << call->dtype.bits(); @@ -58,10 +58,10 @@ inline void DispatchShuffle(const TVMArgs& targs, TVMRetValue* rv) { PrimExpr e_call = targs[0]; using namespace tir; const CallNode* call = e_call.as(); - CHECK(call != nullptr); - CHECK_EQ(call->args.size(), 5); // mask, value, warp_id, width, warp_size + ICHECK(call != nullptr); + ICHECK_EQ(call->args.size(), 5); // mask, value, warp_id, width, warp_size PrimExpr var = call->args[1]; - CHECK_EQ(var.dtype().bits(), 32); + ICHECK_EQ(var.dtype().bits(), 32); // get own lane in self (__lane_id) PrimExpr minus_one = tir::make_const(DataType::Int(32), -1); @@ -82,7 +82,7 @@ inline void DispatchShuffle(const TVMArgs& targs, TVMRetValue* rv) { index = self - delta; index = Select(index < (self & ~(width - 1)), self, index); } else { - CHECK(call->op.same_as(builtin::tvm_warp_shuffle_down())); + ICHECK(call->op.same_as(builtin::tvm_warp_shuffle_down())); PrimExpr delta = call->args[2]; index = self + delta; index = Select((self & (width - 1)) + delta >= width, self, index); diff --git a/src/target/llvm/llvm_common.cc b/src/target/llvm/llvm_common.cc index e8225ab5b6e4..35bfc8dc2e5b 100644 --- a/src/target/llvm/llvm_common.cc +++ b/src/target/llvm/llvm_common.cc @@ -24,7 +24,7 @@ #include "llvm_common.h" -#include +#include #include #include @@ -133,7 +133,7 @@ std::unique_ptr GetLLVMTargetMachine(const Target& target, std::string err; const llvm::Target* llvm_target = llvm::TargetRegistry::lookupTarget(target_triple, err); if (llvm_target == nullptr) { - CHECK(allow_null) << err << " target_triple=" << target_triple; + ICHECK(allow_null) << err << " target_triple=" << target_triple; return nullptr; } llvm::TargetMachine* tm = diff --git a/src/target/llvm/llvm_module.cc b/src/target/llvm/llvm_module.cc index a37710d5622b..569082022852 100644 --- a/src/target/llvm/llvm_module.cc +++ b/src/target/llvm/llvm_module.cc @@ -76,7 +76,7 @@ class LLVMModuleNode final : public runtime::ModuleNode { if (name == runtime::symbol::tvm_module_main) { const char* entry_name = reinterpret_cast(GetGlobalAddr(runtime::symbol::tvm_module_main)); - CHECK(entry_name != nullptr) + ICHECK(entry_name != nullptr) << "Symbol " << runtime::symbol::tvm_module_main << " is not presented"; faddr = reinterpret_cast(GetFunctionAddr(entry_name)); } else { @@ -90,7 +90,7 @@ class LLVMModuleNode final : public runtime::ModuleNode { std::string fmt = runtime::GetFileFormat(file_name, format); std::error_code ecode; llvm::raw_fd_ostream dest(file_name, ecode, llvm::sys::fs::F_None); - CHECK_EQ(ecode.value(), 0) << "Cannot open file: " << file_name << " " << ecode.message(); + ICHECK_EQ(ecode.value(), 0) << "Cannot open file: " << file_name << " " << ecode.message(); if (fmt == "o" || fmt == "obj") { #if TVM_LLVM_VERSION <= 60 std::unique_ptr m = llvm::CloneModule(mptr_); @@ -98,16 +98,16 @@ class LLVMModuleNode final : public runtime::ModuleNode { std::unique_ptr m = llvm::CloneModule(*mptr_); #endif llvm::legacy::PassManager pass; - CHECK(tm_); + ICHECK(tm_); #if TVM_LLVM_VERSION <= 60 - CHECK(tm_->addPassesToEmitFile(pass, dest, llvm::TargetMachine::CGFT_ObjectFile) == 0) + ICHECK(tm_->addPassesToEmitFile(pass, dest, llvm::TargetMachine::CGFT_ObjectFile) == 0) << "Cannot emit target CGFT_ObjectFile"; #elif TVM_LLVM_VERSION <= 90 - CHECK(tm_->addPassesToEmitFile(pass, dest, nullptr, llvm::TargetMachine::CGFT_ObjectFile) == - 0) + ICHECK(tm_->addPassesToEmitFile(pass, dest, nullptr, llvm::TargetMachine::CGFT_ObjectFile) == + 0) << "Cannot emit target CGFT_ObjectFile"; #else - CHECK(tm_->addPassesToEmitFile(pass, dest, nullptr, llvm::CGFT_ObjectFile) == 0) + ICHECK(tm_->addPassesToEmitFile(pass, dest, nullptr, llvm::CGFT_ObjectFile) == 0) << "Cannot emit target CGFT_ObjectFile"; #endif pass.run(*m); @@ -118,16 +118,16 @@ class LLVMModuleNode final : public runtime::ModuleNode { std::unique_ptr m = llvm::CloneModule(*mptr_); #endif llvm::legacy::PassManager pass; - CHECK(tm_); + ICHECK(tm_); #if TVM_LLVM_VERSION <= 60 - CHECK(tm_->addPassesToEmitFile(pass, dest, llvm::TargetMachine::CGFT_AssemblyFile) == 0) + ICHECK(tm_->addPassesToEmitFile(pass, dest, llvm::TargetMachine::CGFT_AssemblyFile) == 0) << "Cannot emit target CGFT_AssemblyFile"; #elif TVM_LLVM_VERSION <= 90 - CHECK(tm_->addPassesToEmitFile(pass, dest, nullptr, llvm::TargetMachine::CGFT_AssemblyFile) == - 0) + ICHECK(tm_->addPassesToEmitFile(pass, dest, nullptr, + llvm::TargetMachine::CGFT_AssemblyFile) == 0) << "Cannot emit target CGFT_AssemblyFile"; #else - CHECK(tm_->addPassesToEmitFile(pass, dest, nullptr, llvm::CGFT_AssemblyFile) == 0) + ICHECK(tm_->addPassesToEmitFile(pass, dest, nullptr, llvm::CGFT_AssemblyFile) == 0) << "Cannot emit target CGFT_AssemblyFile"; #endif pass.run(*m); @@ -163,16 +163,16 @@ class LLVMModuleNode final : public runtime::ModuleNode { std::unique_ptr m = llvm::CloneModule(*mptr_); #endif llvm::legacy::PassManager pass; - CHECK(tm_); + ICHECK(tm_); #if TVM_LLVM_VERSION <= 60 - CHECK(tm_->addPassesToEmitFile(pass, rso, llvm::TargetMachine::CGFT_AssemblyFile) == 0) + ICHECK(tm_->addPassesToEmitFile(pass, rso, llvm::TargetMachine::CGFT_AssemblyFile) == 0) << "Cannot emit target CGFT_AssemblyFile"; #elif TVM_LLVM_VERSION <= 90 - CHECK(tm_->addPassesToEmitFile(pass, rso, nullptr, llvm::TargetMachine::CGFT_AssemblyFile) == - 0) + ICHECK(tm_->addPassesToEmitFile(pass, rso, nullptr, llvm::TargetMachine::CGFT_AssemblyFile) == + 0) << "Cannot emit target CGFT_AssemblyFile"; #else - CHECK(tm_->addPassesToEmitFile(pass, rso, nullptr, llvm::CGFT_AssemblyFile) == 0) + ICHECK(tm_->addPassesToEmitFile(pass, rso, nullptr, llvm::CGFT_AssemblyFile) == 0) << "Cannot emit target CGFT_AssemblyFile"; #endif pass.run(*m); @@ -180,7 +180,7 @@ class LLVMModuleNode final : public runtime::ModuleNode { } else if (fmt == "" || fmt == "ll") { std::string type_str; llvm::raw_string_ostream rso(type_str); - CHECK(mptr_ != nullptr); + ICHECK(mptr_ != nullptr); mptr_->print(rso, nullptr); return rso.str(); } else { @@ -200,16 +200,16 @@ class LLVMModuleNode final : public runtime::ModuleNode { std::vector funcs; std::string entry_func; for (auto kv : mod->functions) { - CHECK(kv.second->IsInstance()) << "Can only lower IR Module with PrimFuncs"; + ICHECK(kv.second->IsInstance()) << "Can only lower IR Module with PrimFuncs"; auto f = Downcast(kv.second); if (f->HasNonzeroAttr(tir::attr::kIsEntryFunc)) { auto global_symbol = f->GetAttr(tvm::attr::kGlobalSymbol); - CHECK(global_symbol.defined()); + ICHECK(global_symbol.defined()); entry_func = global_symbol.value(); } funcs.push_back(f); } - CHECK_NE(funcs.size(), 0U); + ICHECK_NE(funcs.size(), 0U); // TODO(tqchen): remove the entry function behavior as it does not // makes sense when we start to use multiple modules. cg->Init("TVMMod", tm_.get(), ctx_.get(), system_lib, system_lib, target_c_runtime); @@ -254,7 +254,7 @@ class LLVMModuleNode final : public runtime::ModuleNode { llvm::Metadata* tvm_target = module_->getModuleFlag("tvm_target"); if (tvm_target != nullptr) { llvm::MDString* pstr = llvm::dyn_cast(tvm_target); - CHECK(pstr != nullptr); + ICHECK(pstr != nullptr); target_metadata = pstr->getString().str(); if (!(target_metadata.length() >= 4 && target_metadata.substr(0, 4) == "llvm")) { target_metadata = "llvm " + target_metadata; @@ -311,12 +311,12 @@ class LLVMModuleNode final : public runtime::ModuleNode { << " system=" << tm_sys->getTargetTriple().str(); } llvm::DataLayout layout(tm->createDataLayout()); - CHECK(layout == mptr_->getDataLayout()) + ICHECK(layout == mptr_->getDataLayout()) << "Data layout mismatch between module(" << mptr_->getDataLayout().getStringRepresentation() << ")" << " and ExecutionEngine (" << layout.getStringRepresentation() << ")"; ee_ = builder.create(tm.release()); - CHECK(ee_ != nullptr) << "Failed to initialize jit engine for " << mptr_->getTargetTriple(); + ICHECK(ee_ != nullptr) << "Failed to initialize jit engine for " << mptr_->getTargetTriple(); ee_->runStaticConstructorsDestructors(false); if (void** ctx_addr = diff --git a/src/target/opt/build_cuda_on.cc b/src/target/opt/build_cuda_on.cc index 780829c256ce..1a0f08920fb6 100644 --- a/src/target/opt/build_cuda_on.cc +++ b/src/target/opt/build_cuda_on.cc @@ -109,7 +109,7 @@ std::string NVRTCCompile(const std::string& code, bool include_path = false) { std::string log; log.resize(log_size); NVRTC_CALL(nvrtcGetProgramLog(prog, &log[0])); - CHECK_EQ(compile_res, NVRTC_SUCCESS) << log; + ICHECK_EQ(compile_res, NVRTC_SUCCESS) << log; size_t ptx_size; NVRTC_CALL(nvrtcGetPTXSize(prog, &ptx_size)); @@ -128,10 +128,10 @@ runtime::Module BuildCUDA(IRModule mod, Target target) { cg.Init(output_ssa); for (auto kv : mod->functions) { - CHECK(kv.second->IsInstance()) << "CodeGenCUDA: Can only take PrimFunc"; + ICHECK(kv.second->IsInstance()) << "CodeGenCUDA: Can only take PrimFunc"; auto f = Downcast(kv.second); auto calling_conv = f->GetAttr(tvm::attr::kCallingConv); - CHECK(calling_conv == CallingConv::kDeviceKernelLaunch) + ICHECK(calling_conv == CallingConv::kDeviceKernelLaunch) << "CodeGenCUDA: expect calling_conv equals CallingConv::kDeviceKernelLaunch"; cg.AddFunction(f); } diff --git a/src/target/source/codegen_aocl.cc b/src/target/source/codegen_aocl.cc index 00533d27c5a6..b3ed7cf32f7f 100644 --- a/src/target/source/codegen_aocl.cc +++ b/src/target/source/codegen_aocl.cc @@ -41,10 +41,10 @@ runtime::Module BuildAOCL(IRModule mod, Target target, bool emulation) { cg.Init(output_ssa); for (auto kv : mod->functions) { - CHECK(kv.second->IsInstance()) << "CodegenOpenCL: Can only take PrimFunc"; + ICHECK(kv.second->IsInstance()) << "CodegenOpenCL: Can only take PrimFunc"; auto f = Downcast(kv.second); auto calling_conv = f->GetAttr(tvm::attr::kCallingConv); - CHECK(calling_conv == CallingConv::kDeviceKernelLaunch) + ICHECK(calling_conv == CallingConv::kDeviceKernelLaunch) << "CodegenOpenCL: expect calling_conv equals CallingConv::kDeviceKernelLaunch"; cg.AddFunction(f); } diff --git a/src/target/source/codegen_c.cc b/src/target/source/codegen_c.cc index 2f19d6e126ad..ca9b80564cd9 100644 --- a/src/target/source/codegen_c.cc +++ b/src/target/source/codegen_c.cc @@ -78,7 +78,8 @@ void CodeGenC::AddFunction(const PrimFunc& f) { ReserveKeywordsAsUnique(); auto global_symbol = f->GetAttr(tvm::attr::kGlobalSymbol); - CHECK(global_symbol.defined()) << "CodeGenC: Expect PrimFunc to have the global_symbol attribute"; + ICHECK(global_symbol.defined()) + << "CodeGenC: Expect PrimFunc to have the global_symbol attribute"; bool no_alias = f->HasNonzeroAttr(tir::attr::kNoAlias); this->PrintFuncPrefix(); @@ -187,7 +188,7 @@ std::string CodeGenC::GetBufferRef(DataType t, const VarNode* buffer, PrimExpr i // optimize for constant access if (auto* ptr = index.as()) { int64_t offset = ptr->value; - CHECK_EQ(offset % t.lanes(), 0) << "Find unaligned vector load to a vector type"; + ICHECK_EQ(offset % t.lanes(), 0) << "Find unaligned vector load to a vector type"; os << vid << '[' << (offset / t.lanes()) << ']'; return os.str(); } @@ -275,7 +276,7 @@ std::string CodeGenC::GetStructRef(DataType t, const PrimExpr& buffer, const Pri os << ')'; return os.str(); } else { - CHECK_LT(kind, builtin::kTVMValueKindBound_); + ICHECK_LT(kind, builtin::kTVMValueKindBound_); std::ostringstream os; os << "(((TVMValue*)"; this->PrintExpr(buffer, os); @@ -305,7 +306,7 @@ void CodeGenC::RegisterHandleType(const VarNode* buf_var, DataType t) { if (it == handle_data_type_.end()) { handle_data_type_[buf_var] = t; } else { - CHECK(it->second == t) << "conflicting buf var type"; + ICHECK(it->second == t) << "conflicting buf var type"; } } @@ -346,11 +347,11 @@ void CodeGenC::PrintStorageSync(const CallNode* op) { // NOLINT(*) } void CodeGenC::PrintStorageScope(const std::string& scope, std::ostream& os) { // NOLINT(*) - CHECK_EQ(scope, "global"); + ICHECK_EQ(scope, "global"); } void CodeGenC::PrintType(DataType t, std::ostream& os) { // NOLINT(*) - CHECK_EQ(t.lanes(), 1) << "do not yet support vector types"; + ICHECK_EQ(t.lanes(), 1) << "do not yet support vector types"; if (t.is_handle()) { os << "void*"; return; @@ -491,7 +492,7 @@ inline void PrintBinaryIntrinsic(const CallNode* op, const char* opstr, std::ostream& os, // NOLINT(*) CodeGenC* p) { if (op->dtype.lanes() == 1) { - CHECK_EQ(op->args.size(), 2U); + ICHECK_EQ(op->args.size(), 2U); os << '('; p->PrintExpr(op->args[0], os); os << opstr; @@ -576,7 +577,7 @@ void CodeGenC::VisitExpr_(const CallNode* op, std::ostream& os) { // NOLINT(*) auto call_op = GetRef(ptr_op); if (op->op.same_as(builtin_call_extern_) || op->op.same_as(builtin_call_pure_extern_)) { - CHECK_GE(op->args.size(), 1U); + ICHECK_GE(op->args.size(), 1U); auto func = Downcast(op->args[0]); this->PrintCallExtern(GetType(GetRef(op)), func->value, op->args, true, os); } else if (op_attr_global_symbol_.count(call_op)) { @@ -586,7 +587,7 @@ void CodeGenC::VisitExpr_(const CallNode* op, std::ostream& os) { // NOLINT(*) } else if (op->op.same_as(builtin::bitwise_and())) { PrintBinaryIntrinsic(op, " & ", os, this); } else if (op->op.same_as(builtin::large_uint_imm())) { - CHECK_EQ(op->args.size(), 2U); + ICHECK_EQ(op->args.size(), 2U); uint64_t low = static_cast(Downcast(op->args[0])->value); uint64_t high = static_cast(Downcast(op->args[1])->value); uint64_t val = (high << 32U) | low; @@ -596,7 +597,7 @@ void CodeGenC::VisitExpr_(const CallNode* op, std::ostream& os) { // NOLINT(*) } else if (op->op.same_as(builtin::bitwise_or())) { PrintBinaryIntrinsic(op, " | ", os, this); } else if (op->op.same_as(builtin::bitwise_not())) { - CHECK_EQ(op->args.size(), 1U); + ICHECK_EQ(op->args.size(), 1U); os << "(~"; this->PrintExpr(op->args[0], os); os << ')'; @@ -614,7 +615,7 @@ void CodeGenC::VisitExpr_(const CallNode* op, std::ostream& os) { // NOLINT(*) os << ")"; } else if (op->op.same_as(builtin::address_of())) { const LoadNode* l = op->args[0].as(); - CHECK(op->args.size() == 1 && l); + ICHECK(op->args.size() == 1 && l); os << "(("; this->PrintType(l->dtype.element_of(), os); os << " *)" << this->GetVarID(l->buffer_var.get()) << " + " @@ -625,10 +626,10 @@ void CodeGenC::VisitExpr_(const CallNode* op, std::ostream& os) { // NOLINT(*) } os << "))"; } else if (op->op.same_as(builtin::tvm_struct_get())) { - CHECK_EQ(op->args.size(), 3U); + ICHECK_EQ(op->args.size(), 3U); os << GetStructRef(op->dtype, op->args[0], op->args[1], op->args[2].as()->value); } else if (op->op.same_as(builtin::isnullptr())) { - CHECK_EQ(op->args.size(), 1U); + ICHECK_EQ(op->args.size(), 1U); os << "("; this->PrintExpr(op->args[0], os); os << " == NULL)"; @@ -649,7 +650,7 @@ void CodeGenC::VisitExpr_(const CallNode* op, std::ostream& os) { // NOLINT(*) LOG(FATAL) << "Unresolved call " << op->op; } } else { - CHECK(op->op.as()); + ICHECK(op->op.as()); LOG(FATAL) << "Do not yet support cross function call"; } } @@ -678,7 +679,7 @@ void CodeGenC::VisitExpr_(const LoadNode* op, std::ostream& os) { // NOLINT(*) std::string ref = GetBufferRef(op->dtype, op->buffer_var.get(), op->index); HandleVolatileLoads(ref, op, os); } else { - CHECK(is_one(op->predicate)) << "predicated load is not supported"; + ICHECK(is_one(op->predicate)) << "predicated load is not supported"; arith::PVar base; if (arith::ramp(base, 1, op->dtype.lanes()).Match(op->index)) { @@ -722,7 +723,7 @@ void CodeGenC::VisitStmt_(const StoreNode* op) { this->PrintIndent(); stream << ref << " = " << value << ";\n"; } else { - CHECK(is_one(op->predicate)) << "Predicated store is not supported"; + ICHECK(is_one(op->predicate)) << "Predicated store is not supported"; arith::PVar base; // The assignment below introduces side-effect, and the resulting value cannot @@ -767,7 +768,7 @@ void CodeGenC::VisitStmt_(const StoreNode* op) { void CodeGenC::VisitExpr_(const LetNode* op, std::ostream& os) { // NOLINT(*) auto it = let_binding_.find(op->var); if (it != let_binding_.end()) { - CHECK(deep_equal_(it->second->value, op->value)) + ICHECK(deep_equal_(it->second->value, op->value)) << "Let cannot bind the same var to two different values"; } else { let_binding_[op->var] = op; @@ -779,7 +780,7 @@ void CodeGenC::VisitExpr_(const LetNode* op, std::ostream& os) { // NOLINT(*) void CodeGenC::VisitExpr_(const RampNode* op, std::ostream& os) { // NOLINT(*) // constraint of current logic - CHECK_EQ(op->base.dtype(), DataType::Int(32)); + ICHECK_EQ(op->base.dtype(), DataType::Int(32)); os << "((int" << op->lanes << ")("; for (int i = 0; i < op->lanes; i++) { os << "(" << PrintExpr(op->base) << ")" @@ -810,7 +811,7 @@ void CodeGenC::VisitExpr_(const SelectNode* op, std::ostream& os) { // NOLINT(* void CodeGenC::VisitStmt_(const LetStmtNode* op) { std::string value = PrintExpr(op->value); if (print_ssa_form_) { - CHECK(!var_idmap_.count(op->var.get())); + ICHECK(!var_idmap_.count(op->var.get())); var_idmap_[op->var.get()] = value; } else { PrintIndent(); @@ -828,12 +829,12 @@ void CodeGenC::VisitStmt_(const LetStmtNode* op) { } void CodeGenC::VisitStmt_(const AllocateNode* op) { - CHECK(!is_zero(op->condition)); + ICHECK(!is_zero(op->condition)); std::string vid = AllocVarID(op->buffer_var.get()); this->PrintIndent(); int32_t constant_size = op->constant_allocation_size(); - CHECK_GT(constant_size, 0) << "Can only handle constant size stack allocation for now"; + ICHECK_GT(constant_size, 0) << "Can only handle constant size stack allocation for now"; const VarNode* buffer = op->buffer_var.as(); std::string scope = alloc_storage_scope_.at(buffer); PrintStorageScope(scope, stream); @@ -854,15 +855,15 @@ void CodeGenC::VisitStmt_(const AttrStmtNode* op) { } } else if (op->attr_key == tir::attr::storage_scope) { const VarNode* v = op->node.as(); - CHECK(v); + ICHECK(v); alloc_storage_scope_[v] = op->value.as()->value; } else if (op->attr_key == tir::attr::volatile_scope) { const VarNode* v = op->node.as(); - CHECK(v); + ICHECK(v); volatile_buf_.insert(v); } else if (op->attr_key == tir::attr::pragma_import_c) { const StringImmNode* value = op->value.as(); - CHECK(value != nullptr); + ICHECK(value != nullptr); decl_stream << value->value; } this->PrintStmt(op->body); @@ -873,7 +874,7 @@ void CodeGenC::VisitStmt_(const AssertStmtNode* op) { PrintIndent(); if (const auto* str = op->message.as()) { // GLOG style check - stream << "CHECK(" << cond << ") << \"" << str->value << "\";\n"; + stream << "ICHECK(" << cond << ") << \"" << str->value << "\";\n"; } else { stream << "assert(" << cond << ");\n"; } @@ -884,7 +885,7 @@ void CodeGenC::VisitStmt_(const ForNode* op) { std::string extent = PrintExpr(op->extent); PrintIndent(); std::string vid = AllocVarID(op->loop_var.get()); - CHECK(is_zero(op->min)); + ICHECK(is_zero(op->min)); stream << "for ("; PrintType(op->loop_var.dtype(), stream); stream << ' ' << vid << " = 0; " << vid << " < " << extent << "; ++" << vid << ") {\n"; @@ -932,7 +933,7 @@ void CodeGenC::VisitStmt_(const EvaluateNode* op) { this->PrintStorageSync(call); return; } else if (call->op.same_as(builtin::tvm_struct_set())) { - CHECK_EQ(call->args.size(), 4); + ICHECK_EQ(call->args.size(), 4); std::string value = PrintExpr(call->args[3]); std::string ref = GetStructRef(call->args[3].dtype(), call->args[0], call->args[1], call->args[2].as()->value); @@ -949,7 +950,7 @@ void CodeGenC::VisitStmt_(const EvaluateNode* op) { } void CodeGenC::PrintVecElemLoadExpr(DataType t, int i, const std::string& value, std::ostream& os) { - CHECK_GT(t.lanes(), 1); + ICHECK_GT(t.lanes(), 1); if (t.bits() == 8 && (t.is_int() || t.is_uint())) { if (i != 0) { os << "|"; diff --git a/src/target/source/codegen_c_host.cc b/src/target/source/codegen_c_host.cc index dc93c31e7024..310dab41215b 100644 --- a/src/target/source/codegen_c_host.cc +++ b/src/target/source/codegen_c_host.cc @@ -49,7 +49,7 @@ void CodeGenCHost::Init(bool output_ssa, bool emit_asserts) { void CodeGenCHost::AddFunction(const PrimFunc& f) { auto global_symbol = f->GetAttr(tvm::attr::kGlobalSymbol); - CHECK(global_symbol.defined()) + ICHECK(global_symbol.defined()) << "CodeGenCHost: Expect PrimFunc to have the global_symbol attribute"; function_names_.emplace_back(global_symbol.value()); @@ -71,7 +71,7 @@ void CodeGenCHost::PrintFinalReturn() { // NOLINT(*) void CodeGenCHost::PrintType(DataType t, std::ostream& os) { // NOLINT(*) int lanes = t.lanes(); if (t.is_handle()) { - CHECK_EQ(lanes, 1) << "does not support vector types"; + ICHECK_EQ(lanes, 1) << "does not support vector types"; os << "void*"; return; } @@ -192,7 +192,7 @@ void CodeGenCHost::VisitExpr_(const CallNode* op, std::ostream& os) { // NOLINT std::string stack_name = GetUniqueName("stack"); const std::string& type = op->args[0].as()->value; const IntImmNode* num = op->args[1].as(); - CHECK(num != nullptr); + ICHECK(num != nullptr); static_assert(alignof(TVMValue) % alignof(DLTensor) == 0, "invariant"); size_t unit = sizeof(TVMValue); size_t size = 0; @@ -212,18 +212,18 @@ void CodeGenCHost::VisitExpr_(const CallNode* op, std::ostream& os) { // NOLINT os << stack_name; } else if (op->op.same_as(builtin::tvm_call_packed_lowered())) { const StringImmNode* s = op->args[0].as(); - CHECK(s != nullptr) << "tvm_call_packed_lowered expects first argument as function name"; + ICHECK(s != nullptr) << "tvm_call_packed_lowered expects first argument as function name"; int64_t begin = op->args[3].as()->value; int64_t end = op->args[4].as()->value; int64_t num_args = end - begin; - CHECK_GE(num_args, 0); + ICHECK_GE(num_args, 0); std::string func_name = s->value; // NOTE: cannot rely on GetUnique for global decl_stream declarations // because it is reset between AddFunction(). std::string packed_func_name = func_name + "_packed"; if (declared_globals_.insert(packed_func_name).second) { // Still reserve the name among unique names. - CHECK(GetUniqueName(packed_func_name) == packed_func_name) + ICHECK(GetUniqueName(packed_func_name) == packed_func_name) << "Expected name " << packed_func_name << " to not be taken"; decl_stream << "static void* " << packed_func_name << " = NULL;\n"; } @@ -307,13 +307,13 @@ runtime::Module BuildCHost(IRModule mod, Target target) { cg.Init(output_ssa, emit_asserts); for (auto kv : mod->functions) { - CHECK(kv.second->IsInstance()) << "CodegenCHost: Can only take PrimFunc"; + ICHECK(kv.second->IsInstance()) << "CodegenCHost: Can only take PrimFunc"; auto f = Downcast(kv.second); cg.AddFunction(f); } if (target->GetAttr("system-lib").value_or(Bool(false))) { - CHECK_EQ(target->GetAttr("runtime").value_or(""), "c") + ICHECK_EQ(target->GetAttr("runtime").value_or(""), "c") << "c target only supports generating C runtime SystemLibs"; cg.GenerateFuncRegistry(); cg.GenerateCrtSystemLib(); diff --git a/src/target/source/codegen_cuda.cc b/src/target/source/codegen_cuda.cc index d57efa007272..51fcbb633de7 100644 --- a/src/target/source/codegen_cuda.cc +++ b/src/target/source/codegen_cuda.cc @@ -41,7 +41,7 @@ void CodeGenCUDA::Init(bool output_ssa) { CodeGenC::Init(output_ssa); vid_global_barrier_state_ = GetUniqueName(runtime::symbol::tvm_global_barrier_state); vid_global_barrier_expect_ = GetUniqueName("__barrier_expect"); - CHECK_EQ(vid_global_barrier_state_, runtime::symbol::tvm_global_barrier_state); + ICHECK_EQ(vid_global_barrier_state_, runtime::symbol::tvm_global_barrier_state); } void CodeGenCUDA::PrintFuncPrefix() { stream << "extern \"C\" __global__ void"; } @@ -83,7 +83,7 @@ std::string CodeGenCUDA::Finish() { } void CodeGenCUDA::VisitStmt_(const tir::ForNode* op) { - CHECK(is_const_int(op->min, 0)); + ICHECK(is_const_int(op->min, 0)); if (op->for_type == tir::ForType::Unrolled) { PrintIndent(); stream << "#pragma unroll\n"; @@ -92,14 +92,14 @@ void CodeGenCUDA::VisitStmt_(const tir::ForNode* op) { } void CodeGenCUDA::BindThreadIndex(const IterVar& iv) { - CHECK(!var_idmap_.count(iv->var.get())); + ICHECK(!var_idmap_.count(iv->var.get())); var_idmap_[iv->var.get()] = CastFromTo(iv->thread_tag, DataType::UInt(32), iv->var.dtype()); } void CodeGenCUDA::PrintType(DataType t, std::ostream& os) { // NOLINT(*) int lanes = t.lanes(); if (t.is_handle()) { - CHECK_EQ(lanes, 1) << "do not yet support vector types"; + ICHECK_EQ(lanes, 1) << "do not yet support vector types"; os << "void*"; return; } @@ -120,7 +120,7 @@ void CodeGenCUDA::PrintType(DataType t, std::ostream& os) { // NOLINT(*) // h4.z is emitted as *(half2*)(&(u2.y)).x // h4.w is emitted as *(half2*)(&(u2.y)).y // - CHECK_EQ(lanes % 2, 0) << "only support even lane for half type"; + ICHECK_EQ(lanes % 2, 0) << "only support even lane for half type"; os << "uint" << lanes / 2; } else { fail = true; @@ -308,7 +308,7 @@ void CodeGenCUDA::PrintVecElemLoad(const std::string& vec, DataType t, int i, } static const char access[] = {'x', 'y', 'z', 'w'}; - CHECK(i >= 0 && i < (t.is_float16() ? 8 : 4)); + ICHECK(i >= 0 && i < (t.is_float16() ? 8 : 4)); if ((t.is_int()) && t.bits() == 8) { if (t.lanes() == 2 || t.lanes() == 3) { os << vec << "." << access[i % t.lanes()]; @@ -332,7 +332,7 @@ void CodeGenCUDA::PrintVecElemStore(const std::string& vec, DataType t, int i, const std::string& value) { this->PrintIndent(); static const char access[] = {'x', 'y', 'z', 'w'}; - CHECK(i >= 0 && i < (t.is_float16() ? 8 : 4)); + ICHECK(i >= 0 && i < (t.is_float16() ? 8 : 4)); if (t.bits() == 8 && (t.is_int() || t.is_uint())) { if (t.lanes() == 2 || t.lanes() == 3) { stream << vec << '.' << access[i % t.lanes()] << "=" @@ -394,8 +394,8 @@ void CodeGenCUDA::PrintStorageSync(const CallNode* op) { } void CodeGenCUDA::PrintStorageScope(const std::string& scope, std::ostream& os) { // NOLINT(*) - CHECK_NE(scope, "global") << "Cannot allocate global memory when targeting CUDA. You must pass " - "all global arrays as input instead"; + ICHECK_NE(scope, "global") << "Cannot allocate global memory when targeting CUDA. You must pass " + "all global arrays as input instead"; if (scope == "shared") { os << "__shared__ "; } @@ -404,7 +404,7 @@ void CodeGenCUDA::PrintStorageScope(const std::string& scope, std::ostream& os) void CodeGenCUDA::VisitExpr_(const CastNode* op, std::ostream& os) { DataType from_ty = op->value.dtype(); DataType target_ty = op->dtype; - CHECK_EQ(target_ty.lanes(), from_ty.lanes()); + ICHECK_EQ(target_ty.lanes(), from_ty.lanes()); // Emit simple C-style type conversion. if (from_ty.is_scalar()) return CodeGenC::VisitExpr_(op, os); @@ -496,7 +496,7 @@ void CodeGenCUDA::VisitExpr_(const CallNode* op, std::ostream& os) { if (op->op.same_as(builtin::tvm_fill_fragment())) { need_mma_h_ = true; - CHECK_EQ(op->args.size(), 6U); + ICHECK_EQ(op->args.size(), 6U); os << "nvcuda::wmma::fill_fragment("; this->PrintExpr(op->args[0], os); os << "["; @@ -506,7 +506,7 @@ void CodeGenCUDA::VisitExpr_(const CallNode* op, std::ostream& os) { os << ")"; } else if (op->op.same_as(builtin::tvm_load_matrix_sync())) { need_mma_h_ = true; - CHECK_EQ(op->args.size(), 8U); + ICHECK_EQ(op->args.size(), 8U); os << "nvcuda::wmma::load_matrix_sync("; this->PrintExpr(op->args[0], os); os << "["; @@ -518,7 +518,7 @@ void CodeGenCUDA::VisitExpr_(const CallNode* op, std::ostream& os) { os << ")"; } else if (op->op.same_as(builtin::tvm_store_matrix_sync())) { need_mma_h_ = true; - CHECK_EQ(op->args.size(), 8U); + ICHECK_EQ(op->args.size(), 8U); os << "nvcuda::wmma::store_matrix_sync("; this->PrintExpr(op->args[5], os); os << ", "; @@ -535,7 +535,7 @@ void CodeGenCUDA::VisitExpr_(const CallNode* op, std::ostream& os) { os << ")"; } else if (op->op.same_as(builtin::tvm_mma_sync())) { need_mma_h_ = true; - CHECK_EQ(op->args.size(), 8U); + ICHECK_EQ(op->args.size(), 8U); os << "nvcuda::wmma::mma_sync("; for (int i = 0; i < 4; ++i) { this->PrintExpr(op->args[i * 2], os); @@ -545,7 +545,7 @@ void CodeGenCUDA::VisitExpr_(const CallNode* op, std::ostream& os) { } } else if (op->op.same_as(builtin::tvm_bmma_sync())) { need_mma_h_ = true; - CHECK_EQ(op->args.size(), 8U); + ICHECK_EQ(op->args.size(), 8U); os << "nvcuda::wmma::bmma_sync("; for (int i = 0; i < 4; ++i) { this->PrintExpr(op->args[i * 2], os); @@ -572,24 +572,24 @@ void CodeGenCUDA::VisitStmt_(const AttrStmtNode* op) { } void CodeGenCUDA::VisitStmt_(const AllocateNode* op) { - CHECK(!is_zero(op->condition)); + ICHECK(!is_zero(op->condition)); std::string vid = AllocVarID(op->buffer_var.get()); this->PrintIndent(); int32_t constant_size = op->constant_allocation_size(); - CHECK_GT(constant_size, 0) << "Can only handle constant size stack allocation for now"; + ICHECK_GT(constant_size, 0) << "Can only handle constant size stack allocation for now"; const VarNode* buffer = op->buffer_var.as(); std::string scope = alloc_storage_scope_.at(buffer); if (scope.find("wmma.") == 0) { if (scope == "wmma.matrix_a" || scope == "wmma.matrix_b") { - CHECK(op->dtype == DataType::Float(16) || op->dtype == DataType::Int(8) || - op->dtype == DataType::UInt(8) || op->dtype == DataType::Int(4) || - op->dtype == DataType::UInt(4) || op->dtype == DataType::Int(1)) + ICHECK(op->dtype == DataType::Float(16) || op->dtype == DataType::Int(8) || + op->dtype == DataType::UInt(8) || op->dtype == DataType::Int(4) || + op->dtype == DataType::UInt(4) || op->dtype == DataType::Int(1)) << "Matrix_a and matrix_b only support half or char or unsigned char " << "or uint4 or int4 or int1 type for now"; } else { - CHECK(op->dtype == DataType::Float(16) || op->dtype == DataType::Float(32) || - op->dtype == DataType::Int(32)) + ICHECK(op->dtype == DataType::Float(16) || op->dtype == DataType::Float(32) || + op->dtype == DataType::Int(32)) << "Accumulator only support half, float and int type for now"; } constant_size = GetWmmaFragmentSize(scope, buffer, constant_size); @@ -640,7 +640,7 @@ void CodeGenCUDA::VisitExpr_(const BroadcastNode* op, std::ostream& os) { // NO if ((op->dtype.is_int() || op->dtype.is_uint()) && op->dtype.bits() == 8 && op->lanes == 4) { // make_int8x4 const int64_t* p = as_const_int(op->value); - CHECK(p); + ICHECK(p); int64_t v = *p & 0xFF; v = (v << 24) | (v << 16) | (v << 8) | v; if (op->dtype.is_uint()) { @@ -678,7 +678,7 @@ void CodeGenCUDA::VisitExpr_(const BroadcastNode* op, std::ostream& os) { // NO void CodeGenCUDA::VisitExpr_(const ShuffleNode* op, std::ostream& os) { std::vector to_shuffle(op->vectors.size()); for (int i = 0, e = op->vectors.size(); i < e; ++i) { - CHECK(op->vectors[i].dtype().lanes() == 1) << "Only scalars can be shuffled in CUDA!"; + ICHECK(op->vectors[i].dtype().lanes() == 1) << "Only scalars can be shuffled in CUDA!"; to_shuffle[i] = PrintExpr(op->vectors[i]); } os << "make_"; @@ -686,7 +686,7 @@ void CodeGenCUDA::VisitExpr_(const ShuffleNode* op, std::ostream& os) { os << '('; for (int i = 0, e = op->indices.size(); i < e; ++i) { const int64_t* val = as_const_int(op->indices[i]); - CHECK(val && *val >= 0 && (int)*val < (int)to_shuffle.size()); + ICHECK(val && *val >= 0 && (int)*val < (int)to_shuffle.size()); if (i != 0) os << ", "; os << to_shuffle[*val]; } @@ -701,8 +701,8 @@ void CodeGenCUDA::VisitExpr_(const SelectNode* op, std::ostream& os) { } // Codegen vector condition case by serializing the select op. - CHECK(op->false_value->dtype == op->dtype && op->true_value->dtype == op->dtype && - op->dtype.lanes() == op->condition.dtype().lanes()); + ICHECK(op->false_value->dtype == op->dtype && op->true_value->dtype == op->dtype && + op->dtype.lanes() == op->condition.dtype().lanes()); std::string r_var = GetUniqueName("_"); this->PrintIndent(); @@ -846,7 +846,7 @@ void CodeGenCUDA::HandleVolatileLoads(const std::string& value, const LoadNode* void CodeGenCUDA::PrintVecElemLoadExpr(DataType t, int i, const std::string& value, std::ostream& os) { - CHECK_GT(t.lanes(), 1); + ICHECK_GT(t.lanes(), 1); if (t.bits() == 8 && (t.is_int() || t.is_uint())) { if (!(t.lanes() == 2 || t.lanes() == 3)) { if (i != 0) { diff --git a/src/target/source/codegen_metal.cc b/src/target/source/codegen_metal.cc index fb235d2d785d..7b69e8fbb903 100644 --- a/src/target/source/codegen_metal.cc +++ b/src/target/source/codegen_metal.cc @@ -59,7 +59,8 @@ void CodeGenMetal::AddFunction(const PrimFunc& f) { // add to alloc buffer type. auto global_symbol = f->GetAttr(tvm::attr::kGlobalSymbol); - CHECK(global_symbol.defined()) << "CodeGenC: Expect PrimFunc to have the global_symbol attribute"; + ICHECK(global_symbol.defined()) + << "CodeGenC: Expect PrimFunc to have the global_symbol attribute"; // Function header. this->stream << "kernel void " << static_cast(global_symbol.value()) << "("; @@ -97,7 +98,7 @@ void CodeGenMetal::AddFunction(const PrimFunc& f) { decl_stream << "struct " << arg_buf_type << " {\n"; for (size_t i = num_buffer; i < f->params.size(); ++i) { Var v = f->params[i]; - CHECK(!v.dtype().is_handle()); + ICHECK(!v.dtype().is_handle()); std::string vid = AllocVarID(v.get()); std::ostringstream vref; if (v.dtype().bits() == 32) { @@ -116,8 +117,8 @@ void CodeGenMetal::AddFunction(const PrimFunc& f) { decl_stream << "};\n\n"; } // Setup the thread group info. - CHECK_EQ(GetUniqueName("threadIdx"), "threadIdx"); - CHECK_EQ(GetUniqueName("blockIdx"), "blockIdx"); + ICHECK_EQ(GetUniqueName("threadIdx"), "threadIdx"); + ICHECK_EQ(GetUniqueName("blockIdx"), "blockIdx"); int work_dim = 0; auto thread_axis = f->GetAttr>(tir::attr::kDeviceThreadAxis).value(); @@ -136,7 +137,7 @@ void CodeGenMetal::AddFunction(const PrimFunc& f) { } // bind thread axis for (IterVar iv : thread_axis) { - CHECK(!var_idmap_.count(iv->var.get())); + ICHECK(!var_idmap_.count(iv->var.get())); std::string vname = iv->thread_tag; if (work_dim <= 1) { vname = vname.substr(0, iv->thread_tag.length() - 2); @@ -154,7 +155,7 @@ void CodeGenMetal::AddFunction(const PrimFunc& f) { } void CodeGenMetal::BindThreadIndex(const IterVar& iv) { - CHECK(!var_idmap_.count(iv->var.get())); + ICHECK(!var_idmap_.count(iv->var.get())); var_idmap_[iv->var.get()] = CastFromTo(iv->thread_tag, DataType::UInt(thread_index_bits_), iv->var.dtype()); } @@ -162,7 +163,7 @@ void CodeGenMetal::BindThreadIndex(const IterVar& iv) { void CodeGenMetal::PrintType(DataType t, std::ostream& os) { // NOLINT(*) int lanes = t.lanes(); if (t.is_handle()) { - CHECK_EQ(lanes, 1) << "do not yet support vector types"; + ICHECK_EQ(lanes, 1) << "do not yet support vector types"; os << "void*"; return; } @@ -289,10 +290,10 @@ runtime::Module BuildMetal(IRModule mod, Target target) { cg.Init(output_ssa); for (auto kv : mod->functions) { - CHECK(kv.second->IsInstance()) << "CodeGenMetal: Can only take PrimFunc"; + ICHECK(kv.second->IsInstance()) << "CodeGenMetal: Can only take PrimFunc"; auto f = Downcast(kv.second); auto calling_conv = f->GetAttr(tvm::attr::kCallingConv); - CHECK(calling_conv == CallingConv::kDeviceKernelLaunch) + ICHECK(calling_conv == CallingConv::kDeviceKernelLaunch) << "CodeGenMetal: expect calling_conv equals CallingConv::kDeviceKernelLaunch"; cg.AddFunction(f); } diff --git a/src/target/source/codegen_opencl.cc b/src/target/source/codegen_opencl.cc index 10cc007c4572..0f79df37701c 100644 --- a/src/target/source/codegen_opencl.cc +++ b/src/target/source/codegen_opencl.cc @@ -79,7 +79,7 @@ std::string CodeGenOpenCL::Finish() { } void CodeGenOpenCL::BindThreadIndex(const IterVar& iv) { - CHECK(!var_idmap_.count(iv->var.get())); + ICHECK(!var_idmap_.count(iv->var.get())); runtime::ThreadScope ts = runtime::ThreadScope::Create(iv->thread_tag); std::ostringstream os; if (ts.rank == 1) { @@ -93,7 +93,7 @@ void CodeGenOpenCL::BindThreadIndex(const IterVar& iv) { void CodeGenOpenCL::PrintType(DataType t, std::ostream& os) { // NOLINT(*) int lanes = t.lanes(); if (t.is_handle()) { - CHECK_EQ(lanes, 1) << "do not yet support vector types"; + ICHECK_EQ(lanes, 1) << "do not yet support vector types"; os << "void*"; return; } @@ -233,7 +233,7 @@ void CodeGenOpenCL::VisitExpr_(const CallNode* op, std::ostream& os) { if (op->op.same_as(builtin::address_of())) { // Overload tvm_address_of to add storage scope (e.g. __global). const LoadNode* load = op->args[0].as(); - CHECK(op->args.size() == 1 && load); + ICHECK(op->args.size() == 1 && load); os << "(("; auto it = alloc_storage_scope_.find(load->buffer_var.get()); if (it != alloc_storage_scope_.end()) { @@ -287,10 +287,10 @@ runtime::Module BuildOpenCL(IRModule mod, Target target) { cg.Init(output_ssa); for (auto kv : mod->functions) { - CHECK(kv.second->IsInstance()) << "CodeGenOpenCL: Can only take PrimFunc"; + ICHECK(kv.second->IsInstance()) << "CodeGenOpenCL: Can only take PrimFunc"; auto f = Downcast(kv.second); auto calling_conv = f->GetAttr(tvm::attr::kCallingConv); - CHECK(calling_conv == CallingConv::kDeviceKernelLaunch) + ICHECK(calling_conv == CallingConv::kDeviceKernelLaunch) << "CodeGenOpenCL: expect calling_conv equals CallingConv::kDeviceKernelLaunch"; cg.AddFunction(f); } diff --git a/src/target/source/codegen_source_base.cc b/src/target/source/codegen_source_base.cc index 9b2f0345864f..9f0cf9a70b61 100644 --- a/src/target/source/codegen_source_base.cc +++ b/src/target/source/codegen_source_base.cc @@ -70,7 +70,7 @@ std::string CodeGenSourceBase::SSAGetID(std::string src, DataType t) { } std::string CodeGenSourceBase::AllocVarID(const tir::VarNode* v) { - CHECK(!var_idmap_.count(v)) << "Need input to be in SSA form dup " << v->name_hint; + ICHECK(!var_idmap_.count(v)) << "Need input to be in SSA form dup " << v->name_hint; std::string key = v->name_hint; std::string vid = GetUniqueName(key); var_idmap_[v] = vid; @@ -79,7 +79,7 @@ std::string CodeGenSourceBase::AllocVarID(const tir::VarNode* v) { std::string CodeGenSourceBase::GetVarID(const tir::VarNode* v) const { auto it = var_idmap_.find(v); - CHECK(it != var_idmap_.end()) << "Find undefined Variable " << v->name_hint; + ICHECK(it != var_idmap_.end()) << "Find undefined Variable " << v->name_hint; return it->second; } @@ -97,7 +97,7 @@ void CodeGenSourceBase::MarkConst(std::string vid) { e.scope_id = 0; ssa_assign_map_[vid] = e; } else { - CHECK_EQ(it->second.vid, vid); + ICHECK_EQ(it->second.vid, vid); } } diff --git a/src/target/source/codegen_vhls.cc b/src/target/source/codegen_vhls.cc index 9401f0682db8..9896d8b833f9 100644 --- a/src/target/source/codegen_vhls.cc +++ b/src/target/source/codegen_vhls.cc @@ -146,10 +146,10 @@ runtime::Module BuildSDAccel(IRModule mod, Target target) { cg.Init(output_ssa); for (auto kv : mod->functions) { - CHECK(kv.second->IsInstance()) << "CodeGenVHLS: Can only take PrimFunc"; + ICHECK(kv.second->IsInstance()) << "CodeGenVHLS: Can only take PrimFunc"; auto f = Downcast(kv.second); auto calling_conv = f->GetAttr(tvm::attr::kCallingConv); - CHECK(calling_conv == CallingConv::kDeviceKernelLaunch) + ICHECK(calling_conv == CallingConv::kDeviceKernelLaunch) << "CodeGenVLHS: expect calling_conv equals CallingConv::kDeviceKernelLaunch"; cg.AddFunction(f); } @@ -160,7 +160,7 @@ runtime::Module BuildSDAccel(IRModule mod, Target target) { Array > kernel_info; for (auto kv : mod->functions) { - CHECK(kv.second->IsInstance()) << "CodeGenOpenCL: Can only take PrimFunc"; + ICHECK(kv.second->IsInstance()) << "CodeGenOpenCL: Can only take PrimFunc"; auto f = Downcast(kv.second); CodeGenVivadoHLS cg; cg.Init(output_ssa); @@ -171,7 +171,7 @@ runtime::Module BuildSDAccel(IRModule mod, Target target) { } auto global_symbol = f->GetAttr(tvm::attr::kGlobalSymbol); - CHECK(global_symbol.defined()) + ICHECK(global_symbol.defined()) << "CodeGenC: Expect PrimFunc to have the global_symbol attribute"; kernel_info.push_back({global_symbol.value(), code}); } diff --git a/src/target/source/intrin_rule_cuda.cc b/src/target/source/intrin_rule_cuda.cc index 9ffceb68e278..0a68736bcd05 100644 --- a/src/target/source/intrin_rule_cuda.cc +++ b/src/target/source/intrin_rule_cuda.cc @@ -102,7 +102,7 @@ struct CUDAWarpIntrinsic { } else if (orig_op.same_as(builtin::tvm_warp_shuffle_up())) { return Op::Get("tir.cuda.__shfl_up_sync"); } else { - CHECK(orig_op.same_as(builtin::tvm_warp_shuffle_down())); + ICHECK(orig_op.same_as(builtin::tvm_warp_shuffle_down())); return Op::Get("tir.cuda.__shfl_down_sync"); } } @@ -117,8 +117,8 @@ template static void DispatchCUDAShuffle(const TVMArgs& args, TVMRetValue* rv) { PrimExpr e = args[0]; const CallNode* call = e.as(); - CHECK(call != nullptr); - CHECK_EQ(call->args.size(), 5); // mask, value, warp_id, width, warp_size + ICHECK(call != nullptr); + ICHECK_EQ(call->args.size(), 5); // mask, value, warp_id, width, warp_size Array cuda_args{{call->args[0], call->args[1], call->args[2], call->args[3]}}; *rv = Call(call->dtype, T()(call->dtype, Downcast(call->op)), cuda_args); diff --git a/src/target/source/intrin_rule_opencl.cc b/src/target/source/intrin_rule_opencl.cc index 7f81e335ec8d..54da5c74ab02 100644 --- a/src/target/source/intrin_rule_opencl.cc +++ b/src/target/source/intrin_rule_opencl.cc @@ -74,10 +74,10 @@ TVM_REGISTER_GLOBAL("tvm.intrin.rule.opencl.cosh").set_body(DispatchPureExtern(); - CHECK(call != nullptr); - CHECK_EQ(call->args.size(), 5); // mask, value, warp_id, width, warp_size + ICHECK(call != nullptr); + ICHECK_EQ(call->args.size(), 5); // mask, value, warp_id, width, warp_size arith::Analyzer analyzer; - CHECK(analyzer.CanProve(call->args[3] == call->args[4])) + ICHECK(analyzer.CanProve(call->args[3] == call->args[4])) << "Intel warp shuffle dose not support width != warp_size"; Array opencl_args{{StringImm("intel_sub_group_shuffle"), call->args[1], call->args[2]}}; *rv = Call(call->dtype, builtin::call_pure_extern(), opencl_args); diff --git a/src/target/source/source_module.cc b/src/target/source/source_module.cc index e1ee1539d986..3be658aa0125 100644 --- a/src/target/source/source_module.cc +++ b/src/target/source/source_module.cc @@ -67,7 +67,7 @@ runtime::Module CreateMetadataModule( for (size_t i = 0; i < variables.size(); i++) { arrays.push_back(variables[i].operator std::string()); } - CHECK_EQ(sym_metadata.count(symbol), 0U) << "Found duplicated symbol: " << symbol; + ICHECK_EQ(sym_metadata.count(symbol), 0U) << "Found duplicated symbol: " << symbol; sym_metadata[symbol] = arrays; } } @@ -132,10 +132,10 @@ class CSourceModuleNode : public runtime::ModuleNode { std::string fmt = GetFileFormat(file_name, format); std::string meta_file = GetMetaFilePath(file_name); if (fmt == "cc") { - CHECK_NE(code_.length(), 0); + ICHECK_NE(code_.length(), 0); SaveBinaryToFile(file_name, code_); } else { - CHECK_EQ(fmt, fmt_) << "Can only save to format=" << fmt_; + ICHECK_EQ(fmt, fmt_) << "Can only save to format=" << fmt_; } } @@ -179,7 +179,7 @@ class DeviceSourceModuleNode final : public runtime::ModuleNode { void SaveToFile(const std::string& file_name, const std::string& format) final { std::string fmt = GetFileFormat(file_name, format); - CHECK_EQ(fmt, fmt_) << "Can only save to format=" << fmt_; + ICHECK_EQ(fmt, fmt_) << "Can only save to format=" << fmt_; std::string meta_file = GetMetaFilePath(file_name); SaveMetaDataToFile(meta_file, fmap_); SaveBinaryToFile(file_name, data_); diff --git a/src/target/spirv/build_vulkan.cc b/src/target/spirv/build_vulkan.cc index 1eef2f8f88e5..a0f0b76eefbd 100644 --- a/src/target/spirv/build_vulkan.cc +++ b/src/target/spirv/build_vulkan.cc @@ -49,10 +49,10 @@ class SPIRVTools { SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES | SPV_BINARY_TO_TEXT_OPTION_INDENT, &text, &diagnostic); - CHECK_EQ(res, SPV_SUCCESS) << " line=" << diagnostic->position.line - << " column=" << diagnostic->position.column - << " index=" << diagnostic->position.index - << " error:" << diagnostic->error; + ICHECK_EQ(res, SPV_SUCCESS) << " line=" << diagnostic->position.line + << " column=" << diagnostic->position.column + << " index=" << diagnostic->position.index + << " error:" << diagnostic->error; std::string ret(text->str); spvTextDestroy(text); @@ -78,13 +78,13 @@ runtime::Module BuildSPIRV(IRModule mod, Target target, bool webgpu_restriction) CodeGenSPIRV cg; for (auto kv : mod->functions) { - CHECK(kv.second->IsInstance()) << "CodeGenSPIRV: Can only take PrimFunc"; + ICHECK(kv.second->IsInstance()) << "CodeGenSPIRV: Can only take PrimFunc"; auto f = Downcast(kv.second); auto calling_conv = f->GetAttr(tvm::attr::kCallingConv); - CHECK(calling_conv == CallingConv::kDeviceKernelLaunch) + ICHECK(calling_conv == CallingConv::kDeviceKernelLaunch) << "CodeGenSPIRV: expect calling_conv equals CallingConv::kDeviceKernelLaunch"; auto global_symbol = f->GetAttr(tvm::attr::kGlobalSymbol); - CHECK(global_symbol.defined()) + ICHECK(global_symbol.defined()) << "CodeGenSPIRV: Expect PrimFunc to have the global_symbol attribute"; std::string f_name = global_symbol.value(); @@ -95,7 +95,7 @@ runtime::Module BuildSPIRV(IRModule mod, Target target, bool webgpu_restriction) if (webgpu_restriction) { for (auto param : f->params) { - CHECK(param.dtype().is_handle()) << "WebGPU does not yet support non-buffer arguments"; + ICHECK(param.dtype().is_handle()) << "WebGPU does not yet support non-buffer arguments"; } } @@ -104,7 +104,7 @@ runtime::Module BuildSPIRV(IRModule mod, Target target, bool webgpu_restriction) arr.data = reinterpret_cast(dmlc::BeginPtr(shader.data)); arr.size = shader.data.size() * sizeof(uint32_t); std::string transformed = (*postproc)(arr); - CHECK_EQ(transformed.length() % 4U, 0U); + ICHECK_EQ(transformed.length() % 4U, 0U); shader.data.resize(transformed.size() / 4U); std::copy(transformed.begin(), transformed.end(), reinterpret_cast(dmlc::BeginPtr(shader.data))); diff --git a/src/target/spirv/codegen_spirv.cc b/src/target/spirv/codegen_spirv.cc index 2a67d953f960..c3b12ab943c6 100644 --- a/src/target/spirv/codegen_spirv.cc +++ b/src/target/spirv/codegen_spirv.cc @@ -35,7 +35,7 @@ namespace codegen { std::vector CodeGenSPIRV::BuildFunction(const PrimFunc& f, const std::string& name) { this->InitFuncState(); - CHECK(f->HasNonzeroAttr(tir::attr::kNoAlias)) << "SPIRV only takes restricted memory model"; + ICHECK(f->HasNonzeroAttr(tir::attr::kNoAlias)) << "SPIRV only takes restricted memory model"; std::vector pod_args; uint32_t num_buffer = 0; @@ -44,7 +44,7 @@ std::vector CodeGenSPIRV::BuildFunction(const PrimFunc& f, const std:: if (t.is_handle()) { if (auto* ptr = arg->type_annotation.as()) { auto* prim = ptr->element_type.as(); - CHECK(prim); + ICHECK(prim); DataType value_type = prim->dtype; spirv::Value arg_value = builder_->BufferArgument(builder_->GetSType(value_type), 0, num_buffer); @@ -98,9 +98,9 @@ spirv::Value CodeGenSPIRV::GetThreadIndex(const IterVar& iv, const PrimExpr& ext if (ts.rank == 1) { v = builder_->GetLocalID(ts.dim_index); auto* sizeptr = extent.as(); - CHECK(sizeptr) << "SPIRV only allows constant thread group size " - << " get " << extent; - CHECK_LT(ts.dim_index, 3); + ICHECK(sizeptr) << "SPIRV only allows constant thread group size " + << " get " << extent; + ICHECK_LT(ts.dim_index, 3); workgroup_size_[ts.dim_index] = static_cast(sizeptr->value); } else { v = builder_->GetWorkgroupID(ts.dim_index); @@ -130,7 +130,7 @@ spirv::Value CodeGenSPIRV::CreateStorageSync(const CallNode* op) { spirv::Value CodeGenSPIRV::VisitExpr_(const VarNode* op) { auto it = var_map_.find(op); - CHECK(it != var_map_.end()) << "cannot find variable " << op->name_hint; + ICHECK(it != var_map_.end()) << "cannot find variable " << op->name_hint; return it->second; } @@ -232,7 +232,7 @@ spirv::Value CodeGenSPIRV::VisitExpr_(const SelectNode* op) { spirv::Value CodeGenSPIRV::VisitExpr_(const LetNode* op) { auto it = let_binding_.find(op->var); if (it != let_binding_.end()) { - CHECK(deep_equal_(it->second->value, op->value)) + ICHECK(deep_equal_(it->second->value, op->value)) << "Let cannot bind the same var to two different values"; } else { let_binding_[op->var] = op; @@ -244,7 +244,7 @@ spirv::Value CodeGenSPIRV::VisitExpr_(const LetNode* op) { spirv::Value CodeGenSPIRV::VisitExpr_(const CallNode* op) { if (op->op.same_as(builtin::call_spirv_pure_glsl450())) { - CHECK_GE(op->args.size(), 2U); + ICHECK_GE(op->args.size(), 2U); uint32_t inst_id = static_cast(op->args[0].as()->value); std::vector values; for (size_t i = 1; i < op->args.size(); ++i) { @@ -252,31 +252,31 @@ spirv::Value CodeGenSPIRV::VisitExpr_(const CallNode* op) { } return builder_->CallGLSL450(builder_->GetSType(op->dtype), inst_id, values); } else if (op->op.same_as(builtin::bitwise_and())) { - CHECK_EQ(op->args.size(), 2U); + ICHECK_EQ(op->args.size(), 2U); spirv::Value a = MakeValue(op->args[0]); spirv::Value b = MakeValue(op->args[1]); return builder_->MakeValue(spv::OpBitwiseAnd, a.stype, a, b); } else if (op->op.same_as(builtin::bitwise_xor())) { - CHECK_EQ(op->args.size(), 2U); + ICHECK_EQ(op->args.size(), 2U); spirv::Value a = MakeValue(op->args[0]); spirv::Value b = MakeValue(op->args[1]); return builder_->MakeValue(spv::OpBitwiseXor, a.stype, a, b); } else if (op->op.same_as(builtin::bitwise_or())) { - CHECK_EQ(op->args.size(), 2U); + ICHECK_EQ(op->args.size(), 2U); spirv::Value a = MakeValue(op->args[0]); spirv::Value b = MakeValue(op->args[1]); return builder_->MakeValue(spv::OpBitwiseOr, a.stype, a, b); } else if (op->op.same_as(builtin::bitwise_not())) { - CHECK_EQ(op->args.size(), 1U); + ICHECK_EQ(op->args.size(), 1U); spirv::Value a = MakeValue(op->args[0]); return builder_->MakeValue(spv::OpNot, a.stype, a); } else if (op->op.same_as(builtin::shift_left())) { - CHECK_EQ(op->args.size(), 2U); + ICHECK_EQ(op->args.size(), 2U); spirv::Value a = MakeValue(op->args[0]); spirv::Value b = MakeValue(op->args[1]); return builder_->MakeValue(spv::OpShiftLeftLogical, a.stype, a, b); } else if (op->op.same_as(builtin::shift_right())) { - CHECK_EQ(op->args.size(), 2U); + ICHECK_EQ(op->args.size(), 2U); spirv::Value a = MakeValue(op->args[0]); spirv::Value b = MakeValue(op->args[1]); if (op->args[0].dtype().is_int()) { @@ -288,7 +288,7 @@ spirv::Value CodeGenSPIRV::VisitExpr_(const CallNode* op) { return builder_->MakeValue(spv::OpBitcast, builder_->GetSType(op->dtype), MakeValue(op->args[0])); } else if (op->op.same_as(builtin::large_uint_imm())) { - CHECK_EQ(op->args.size(), 2U); + ICHECK_EQ(op->args.size(), 2U); uint64_t low = static_cast(Downcast(op->args[0])->value); uint64_t high = static_cast(Downcast(op->args[1])->value); uint64_t val = (high << 32U) | low; @@ -296,7 +296,7 @@ spirv::Value CodeGenSPIRV::VisitExpr_(const CallNode* op) { } else if (op->op.same_as(builtin::tvm_storage_sync())) { return this->CreateStorageSync(op); } else if (op->op.same_as(builtin::if_then_else())) { - CHECK_EQ(op->args.size(), 3U); + ICHECK_EQ(op->args.size(), 3U); spirv::Value cond = MakeValue(op->args[0]); spirv::Label then_label = builder_->NewLabel(); spirv::Label else_label = builder_->NewLabel(); @@ -352,9 +352,9 @@ spirv::Value CodeGenSPIRV::VisitExpr_(const BroadcastNode* op) { } spirv::Value CodeGenSPIRV::VisitExpr_(const LoadNode* op) { - CHECK(is_one(op->predicate)); + ICHECK(is_one(op->predicate)); auto it = storage_info_.find(op->buffer_var.get()); - CHECK(it != storage_info_.end()); + ICHECK(it != storage_info_.end()); StorageInfo& info = it->second; if (!info.content_fixed) { info.UpdateContentType(op->dtype); @@ -369,7 +369,7 @@ spirv::Value CodeGenSPIRV::VisitExpr_(const LoadNode* op) { mask |= spv::MemoryAccessVolatileMask; } if (op->dtype.lanes() == 1) { - CHECK_EQ(info.content_type, op->dtype) + ICHECK_EQ(info.content_type, op->dtype) << "Vulkan only allow one type access to the same buffer"; spirv::Value index = MakeValue(op->index); spirv::Value ptr = builder_->StructArrayAccess(ptr_type, buffer, index); @@ -387,9 +387,9 @@ spirv::Value CodeGenSPIRV::VisitExpr_(const LoadNode* op) { } else { if (const RampNode* ramp = op->index.as()) { if (is_one(ramp->stride)) { - CHECK_EQ(ramp->lanes, op->dtype.lanes()); + ICHECK_EQ(ramp->lanes, op->dtype.lanes()); arith::ModularSet me = analyzer_->modular_set(ramp->base); - CHECK((me->coeff % ramp->lanes) == 0 && (me->base % ramp->lanes) == 0) + ICHECK((me->coeff % ramp->lanes) == 0 && (me->base % ramp->lanes) == 0) << "Only aligned vector access is allowed in SPIRV"; PrimExpr vec_index = analyzer_->Simplify(ramp->base / make_const(ramp->base.dtype(), ramp->lanes)); @@ -420,9 +420,9 @@ void CodeGenSPIRV::Scalarize(const PrimExpr& e, std::functionpredicate)); + ICHECK(is_one(op->predicate)); auto it = storage_info_.find(op->buffer_var.get()); - CHECK(it != storage_info_.end()); + ICHECK(it != storage_info_.end()); StorageInfo& info = it->second; if (!info.content_fixed) { @@ -440,7 +440,7 @@ void CodeGenSPIRV::VisitStmt_(const StoreNode* op) { } if (op->value.dtype().lanes() == 1) { - CHECK_EQ(info.content_type, op->value.dtype()) + ICHECK_EQ(info.content_type, op->value.dtype()) << "Vulkan only allow one type access to the same buffer"; spirv::Value index = MakeValue(op->index); spirv::Value ptr = builder_->StructArrayAccess(ptr_type, buffer, index); @@ -457,9 +457,9 @@ void CodeGenSPIRV::VisitStmt_(const StoreNode* op) { } else { if (const RampNode* ramp = op->index.as()) { if (is_one(ramp->stride)) { - CHECK_EQ(ramp->lanes, op->value.dtype().lanes()); + ICHECK_EQ(ramp->lanes, op->value.dtype().lanes()); arith::ModularSet me = analyzer_->modular_set(ramp->base); - CHECK((me->coeff % ramp->lanes) == 0 && (me->base % ramp->lanes) == 0) + ICHECK((me->coeff % ramp->lanes) == 0 && (me->base % ramp->lanes) == 0) << "Only aligned vector access is allowed in SPIRV"; PrimExpr vec_index = analyzer_->Simplify(ramp->base / make_const(ramp->base.dtype(), ramp->lanes)); @@ -474,7 +474,7 @@ void CodeGenSPIRV::VisitStmt_(const StoreNode* op) { } void CodeGenSPIRV::VisitStmt_(const ForNode* op) { - CHECK(is_zero(op->min)); + ICHECK(is_zero(op->min)); analyzer_->Bind(op->loop_var, Range::FromMinExtent(op->min, op->extent)); spirv::Value init_value = MakeValue(op->min); spirv::Value extent_value = MakeValue(op->extent); @@ -544,10 +544,10 @@ void CodeGenSPIRV::VisitStmt_(const IfThenElseNode* op) { } void CodeGenSPIRV::VisitStmt_(const AllocateNode* op) { - CHECK(!is_zero(op->condition)); - CHECK(!op->dtype.is_handle()); + ICHECK(!is_zero(op->condition)); + ICHECK(!op->dtype.is_handle()); int32_t constant_size = op->constant_allocation_size(); - CHECK_GT(constant_size, 0) << "Can only handle constant size stack allocation in GPU"; + ICHECK_GT(constant_size, 0) << "Can only handle constant size stack allocation in GPU"; spirv::Value buf; StorageInfo& info = storage_info_[op->buffer_var.get()]; spirv::SType etype = builder_->GetSType(op->dtype); @@ -556,15 +556,15 @@ void CodeGenSPIRV::VisitStmt_(const AllocateNode* op) { builder_->Allocate(etype, static_cast(constant_size), spv::StorageClassFunction); } else { // shared memory - CHECK(info.scope.rank == runtime::StorageRank::kShared) + ICHECK(info.scope.rank == runtime::StorageRank::kShared) << "Can only allocate shared or local memory inside kernel"; // Shared memory buf = builder_->Allocate(etype, static_cast(constant_size), spv::StorageClassWorkgroup); } - CHECK(!info.content_fixed); + ICHECK(!info.content_fixed); info.UpdateContentType(op->dtype); - CHECK(!var_map_.count(op->buffer_var.get())); + ICHECK(!var_map_.count(op->buffer_var.get())); var_map_[op->buffer_var.get()] = buf; this->VisitStmt(op->body); } @@ -580,11 +580,11 @@ void CodeGenSPIRV::VisitStmt_(const AttrStmtNode* op) { } } else if (op->attr_key == tir::attr::storage_scope) { const VarNode* v = op->node.as(); - CHECK(v); + ICHECK(v); storage_info_[v].scope = runtime::StorageScope::Create(op->value.as()->value); } else if (op->attr_key == tir::attr::volatile_scope) { const VarNode* v = op->node.as(); - CHECK(v); + ICHECK(v); storage_info_[v].is_volatile = true; } this->VisitStmt(op->body); @@ -596,8 +596,8 @@ void CodeGenSPIRV::VisitStmt_(const AssertStmtNode* op) { } void CodeGenSPIRV::VisitStmt_(const LetStmtNode* op) { - CHECK(!var_map_.count(op->var.get())); - CHECK(!op->var.dtype().is_handle()); + ICHECK(!var_map_.count(op->var.get())); + ICHECK(!op->var.dtype().is_handle()); var_map_[op->var.get()] = MakeValue(op->value); analyzer_->Bind(op->var, op->value); this->VisitStmt(op->body); diff --git a/src/target/spirv/codegen_spirv.h b/src/target/spirv/codegen_spirv.h index 9bf81095f066..be755641c8a5 100644 --- a/src/target/spirv/codegen_spirv.h +++ b/src/target/spirv/codegen_spirv.h @@ -116,7 +116,7 @@ class CodeGenSPIRV : public ExprFunctor, // Update content type if it hasn't beenupdated. void UpdateContentType(DataType type) { if (content_fixed) { - CHECK_EQ(type, content_type) << "Cannot use two different content type in GLSL model"; + ICHECK_EQ(type, content_type) << "Cannot use two different content type in GLSL model"; } else { this->content_type = type; content_fixed = true; diff --git a/src/target/spirv/intrin_rule_spirv.cc b/src/target/spirv/intrin_rule_spirv.cc index ea575ca83866..90b2eb2a671f 100644 --- a/src/target/spirv/intrin_rule_spirv.cc +++ b/src/target/spirv/intrin_rule_spirv.cc @@ -36,7 +36,7 @@ template inline void DispatchGLSLPureIntrin(const TVMArgs& targs, TVMRetValue* rv) { PrimExpr e = targs[0]; const tir::CallNode* call = e.as(); - CHECK(call != nullptr); + ICHECK(call != nullptr); Array cargs; // intrin id. cargs.push_back(IntImm(DataType::UInt(32), id)); diff --git a/src/target/spirv/ir_builder.cc b/src/target/spirv/ir_builder.cc index 305464ac398b..273fc48c3e30 100644 --- a/src/target/spirv/ir_builder.cc +++ b/src/target/spirv/ir_builder.cc @@ -30,7 +30,7 @@ namespace spirv { // implementations void IRBuilder::InitHeader() { - CHECK_EQ(header_.size(), 0U); + ICHECK_EQ(header_.size(), 0U); header_.push_back(spv::MagicNumber); // Use the spirv version as indicated in the SDK. @@ -93,7 +93,7 @@ SType IRBuilder::GetSType(const DataType& dtype) { } SType IRBuilder::GetPointerType(const SType& value_type, spv::StorageClass storage_class) { - CHECK_NE(storage_class, spv::StorageClassMax); + ICHECK_NE(storage_class, spv::StorageClassMax); auto key = std::make_pair(value_type.id, storage_class); auto it = pointer_type_tbl_.find(key); if (it != pointer_type_tbl_.end()) { @@ -128,7 +128,7 @@ SType IRBuilder::GetStructArrayType(const SType& value_type, uint32_t num_elems) ib_.Begin(spv::OpTypeRuntimeArray).AddSeq(arr_type, value_type).Commit(&global_); } int nbits = value_type.type.bits() * value_type.type.lanes(); - CHECK_EQ(nbits % 8, 0); + ICHECK_EQ(nbits % 8, 0); uint32_t nbytes = static_cast(nbits) / 8; // decorate the array type. this->Decorate(spv::OpDecorate, arr_type, spv::DecorationArrayStride, nbytes); @@ -158,7 +158,7 @@ SType IRBuilder::GetStructArrayType(const SType& value_type, uint32_t num_elems) } Value IRBuilder::StructArrayAccess(const SType& res_type, Value buffer, Value index) { - CHECK(buffer.flag == kStructArrayPtr); + ICHECK(buffer.flag == kStructArrayPtr); return MakeValue(spv::OpInBoundsAccessChain, res_type, buffer, const_i32_zero_, index); } @@ -177,7 +177,7 @@ Value IRBuilder::FloatImm(const SType& dtype, double value) { uint64_t data = ptr[0]; return GetConst_(dtype, &data); } else { - CHECK_EQ(dtype.type.bits(), 16); + ICHECK_EQ(dtype.type.bits(), 16); return Cast(dtype, FloatImm(GetSType(DataType::Float(32)), value)); } } @@ -204,7 +204,7 @@ Value IRBuilder::BufferArgument(const SType& value_type, uint32_t descriptor_set } Value IRBuilder::DeclarePushConstant(const std::vector& value_types) { - CHECK_EQ(push_const_.id, 0); + ICHECK_EQ(push_const_.id, 0); SType struct_type; struct_type.id = id_counter_++; struct_type.type = DataType::Handle(); @@ -221,7 +221,7 @@ Value IRBuilder::DeclarePushConstant(const std::vector& value_types) { .Commit(&decorate_); DataType t = value_types[i].type; uint32_t nbits = t.bits() * t.lanes(); - CHECK_EQ(nbits % 8, 0); + ICHECK_EQ(nbits % 8, 0); offset += nbits / 8; } // Decorate push constants as UBO @@ -243,7 +243,7 @@ Value IRBuilder::GetPushConstant(Value ptr_push_const, const SType& v_type, uint Value IRBuilder::NewFunction() { return NewValue(t_void_func_, kFunction); } void IRBuilder::CommitKernelFunction(const Value& func, const std::string& name) { - CHECK_EQ(func.flag, kFunction); + ICHECK_EQ(func.flag, kFunction); ib_.Begin(spv::OpEntryPoint).AddSeq(spv::ExecutionModelGLCompute, func, name); if (workgroup_id_.id != 0) { ib_.Add(workgroup_id_); @@ -255,7 +255,7 @@ void IRBuilder::CommitKernelFunction(const Value& func, const std::string& name) } void IRBuilder::StartFunction(const Value& func) { - CHECK_EQ(func.flag, kFunction); + ICHECK_EQ(func.flag, kFunction); // add function declaration to the header. ib_.Begin(spv::OpFunction).AddSeq(t_void_, func, 0, t_void_func_).Commit(&func_header_); @@ -265,7 +265,7 @@ void IRBuilder::StartFunction(const Value& func) { } void IRBuilder::SetLocalSize(const Value& func, uint32_t local_size[3]) { - CHECK_EQ(func.flag, kFunction); + ICHECK_EQ(func.flag, kFunction); ib_.Begin(spv::OpExecutionMode) .AddSeq(func, spv::ExecutionModeLocalSize, local_size[0], local_size[1], local_size[2]) .Commit(&exec_mode_); @@ -273,7 +273,7 @@ void IRBuilder::SetLocalSize(const Value& func, uint32_t local_size[3]) { Value IRBuilder::Allocate(const SType& value_type, uint32_t num_elems, spv::StorageClass storage_class) { - CHECK_NE(num_elems, 0U); + ICHECK_NE(num_elems, 0U); SType sarr_type = GetStructArrayType(value_type, num_elems); SType ptr_type = GetPointerType(sarr_type, storage_class); Value val = NewValue(ptr_type, kStructArrayPtr); @@ -322,7 +322,7 @@ Value IRBuilder::GetConst_(const SType& dtype, const uint64_t* pvalue) { if (it != const_tbl_.end()) { return it->second; } - CHECK_LE(dtype.type.bits(), 64); + ICHECK_LE(dtype.type.bits(), 64); Value ret = NewValue(dtype, kConstant); if (dtype.type == DataType::UInt(1)) { // bool types. @@ -357,7 +357,7 @@ SType IRBuilder::DeclareType(const DataType& dtype) { t.id = id_counter_++; t.type = dtype; if (dtype.bits() == 1) { - CHECK(dtype.is_uint()); + ICHECK(dtype.is_uint()); ib_.Begin(spv::OpTypeBool).Add(t).Commit(&global_); } else if (dtype.is_int()) { ib_.Begin(spv::OpTypeInt).AddSeq(t, dtype.bits(), 1).Commit(&global_); @@ -390,7 +390,7 @@ PhiValue IRBuilder::MakePhi(const SType& out_type, uint32_t num_incoming) { phi.stype = out_type; phi.flag = kNormal; phi.instr = ib_.Commit(&function_); - CHECK_EQ(phi.instr.WordCount(), 2 * num_incoming + 3); + ICHECK_EQ(phi.instr.WordCount(), 2 * num_incoming + 3); return phi; } @@ -410,7 +410,7 @@ Value IRBuilder::Concat(const std::vector& vec) { DataType etype = vec[0].stype.type; int lanes = etype.lanes(); for (size_t i = 1; i < vec.size(); ++i) { - CHECK_EQ(etype, vec[i].stype.type.element_of()) + ICHECK_EQ(etype, vec[i].stype.type.element_of()) << "Cannot concat vector of different element type"; lanes += vec[i].stype.type.lanes(); is_const = is_const && (vec[i].flag == kConstant); @@ -435,11 +435,11 @@ Value IRBuilder::Concat(const std::vector& vec) { } Value IRBuilder::Cast(const SType& dst_type, spirv::Value value) { - CHECK_NE(value.stype.id, 0U); + ICHECK_NE(value.stype.id, 0U); if (value.stype.id == dst_type.id) return value; const tvm::DataType& from = value.stype.type; const tvm::DataType& to = dst_type.type; - CHECK_EQ(from.lanes(), to.lanes()); + ICHECK_EQ(from.lanes(), to.lanes()); if (from == DataType::Bool()) { if (to.is_int()) { return Select(value, IntImm(dst_type, 1), IntImm(dst_type, 0)); @@ -493,24 +493,24 @@ Value IRBuilder::Cast(const SType& dst_type, spirv::Value value) { #define DEFINE_BUILDER_BINARY_USIGN_OP(_OpName, _Op) \ Value IRBuilder::_OpName(Value a, Value b) { \ - CHECK_EQ(a.stype.id, b.stype.id); \ + ICHECK_EQ(a.stype.id, b.stype.id); \ if (a.stype.type.is_int() || a.stype.type.is_uint()) { \ return MakeValue(spv::OpI##_Op, a.stype, a, b); \ } else { \ - CHECK(a.stype.type.is_float()); \ + ICHECK(a.stype.type.is_float()); \ return MakeValue(spv::OpF##_Op, a.stype, a, b); \ } \ } #define DEFINE_BUILDER_BINARY_SIGN_OP(_OpName, _Op) \ Value IRBuilder::_OpName(Value a, Value b) { \ - CHECK_EQ(a.stype.id, b.stype.id); \ + ICHECK_EQ(a.stype.id, b.stype.id); \ if (a.stype.type.is_int()) { \ return MakeValue(spv::OpS##_Op, a.stype, a, b); \ } else if (a.stype.type.is_uint()) { \ return MakeValue(spv::OpU##_Op, a.stype, a, b); \ } else { \ - CHECK(a.stype.type.is_float()); \ + ICHECK(a.stype.type.is_float()); \ return MakeValue(spv::OpF##_Op, a.stype, a, b); \ } \ } @@ -521,28 +521,28 @@ DEFINE_BUILDER_BINARY_USIGN_OP(Mul, Mul); DEFINE_BUILDER_BINARY_SIGN_OP(Div, Div); Value IRBuilder::Mod(Value a, Value b) { - CHECK_EQ(a.stype.id, b.stype.id); + ICHECK_EQ(a.stype.id, b.stype.id); if (a.stype.type.is_int()) { return MakeValue(spv::OpSRem, a.stype, a, b); } else if (a.stype.type.is_uint()) { return MakeValue(spv::OpUMod, a.stype, a, b); } else { - CHECK(a.stype.type.is_float()); + ICHECK(a.stype.type.is_float()); return MakeValue(spv::OpFRem, a.stype, a, b); } } #define DEFINE_BUILDER_CMP_OP(_OpName, _Op) \ Value IRBuilder::_OpName(Value a, Value b) { \ - CHECK_EQ(a.stype.id, b.stype.id); \ - CHECK_EQ(a.stype.type.lanes(), b.stype.type.lanes()); \ + ICHECK_EQ(a.stype.id, b.stype.id); \ + ICHECK_EQ(a.stype.type.lanes(), b.stype.type.lanes()); \ const auto& bool_type = this->GetSType(DataType::UInt(1).with_lanes(a.stype.type.lanes())); \ if (a.stype.type.is_int()) { \ return MakeValue(spv::OpS##_Op, bool_type, a, b); \ } else if (a.stype.type.is_uint()) { \ return MakeValue(spv::OpU##_Op, bool_type, a, b); \ } else { \ - CHECK(a.stype.type.is_float()); \ + ICHECK(a.stype.type.is_float()); \ return MakeValue(spv::OpFOrd##_Op, bool_type, a, b); \ } \ } @@ -554,13 +554,13 @@ DEFINE_BUILDER_CMP_OP(GE, GreaterThanEqual); #define DEFINE_BUILDER_CMP_UOP(_OpName, _Op) \ Value IRBuilder::_OpName(Value a, Value b) { \ - CHECK_EQ(a.stype.id, b.stype.id); \ - CHECK_EQ(a.stype.type.lanes(), b.stype.type.lanes()); \ + ICHECK_EQ(a.stype.id, b.stype.id); \ + ICHECK_EQ(a.stype.type.lanes(), b.stype.type.lanes()); \ const auto& bool_type = this->GetSType(DataType::UInt(1).with_lanes(a.stype.type.lanes())); \ if (a.stype.type.is_int() || a.stype.type.is_uint()) { \ return MakeValue(spv::OpI##_Op, bool_type, a, b); \ } else { \ - CHECK(a.stype.type.is_float()); \ + ICHECK(a.stype.type.is_float()); \ return MakeValue(spv::OpFOrd##_Op, bool_type, a, b); \ } \ } @@ -569,8 +569,8 @@ DEFINE_BUILDER_CMP_UOP(EQ, Equal); DEFINE_BUILDER_CMP_UOP(NE, NotEqual); Value IRBuilder::Select(Value cond, Value a, Value b) { - CHECK_EQ(a.stype.id, b.stype.id); - CHECK_EQ(cond.stype.type.element_of(), DataType::UInt(1)); + ICHECK_EQ(a.stype.id, b.stype.id); + ICHECK_EQ(cond.stype.type.element_of(), DataType::UInt(1)); return MakeValue(spv::OpSelect, a.stype, cond, a, b); } diff --git a/src/target/spirv/ir_builder.h b/src/target/spirv/ir_builder.h index c52f92fd7c20..8a08048e1955 100644 --- a/src/target/spirv/ir_builder.h +++ b/src/target/spirv/ir_builder.h @@ -93,7 +93,7 @@ class Instr { * \return reference to idx-th word. */ uint32_t& operator[](uint32_t idx) { - CHECK_LT(idx, word_count_); + ICHECK_LT(idx, word_count_); return (*data_)[begin_ + idx]; } @@ -122,7 +122,7 @@ struct PhiValue : public Value { * \param parent The parent label. */ void SetIncoming(uint32_t index, const Value& value, const Label& parent) { - CHECK_EQ(this->stype.id, value.stype.id); + ICHECK_EQ(this->stype.id, value.stype.id); instr[3 + index * 2] = value.id; instr[3 + index * 2 + 1] = parent.id; } @@ -152,7 +152,7 @@ class InstrBuilder { */ InstrBuilder& Begin(spv::Op op) { // NOLINT(*); // finish previous build - CHECK_EQ(data_.size(), 0U); + ICHECK_EQ(data_.size(), 0U); op_ = op; data_.push_back(0); return *this; diff --git a/src/target/stackvm/codegen_stackvm.cc b/src/target/stackvm/codegen_stackvm.cc index ac3ba78fa4d5..0dd96e07ed96 100644 --- a/src/target/stackvm/codegen_stackvm.cc +++ b/src/target/stackvm/codegen_stackvm.cc @@ -75,12 +75,12 @@ StackVM::StructFieldKind MapFieldKind(int64_t kind) { } StackVM CodeGenStackVM::Compile(const PrimFunc& f) { - CHECK_EQ(f->buffer_map.size(), 0U) + ICHECK_EQ(f->buffer_map.size(), 0U) << "Cannot codegen function with buffer_map, please lower them first"; for (size_t i = 0; i < f->params.size(); ++i) { Var v = f->params[i]; int vid = AllocVarID(v.get()); - CHECK_EQ(static_cast(vid), i); + ICHECK_EQ(static_cast(vid), i); } this->Push(f->body); vm_.InitCache(); @@ -101,7 +101,7 @@ void CodeGenStackVM::PushOp(StackVM::OpCode opcode) { } void CodeGenStackVM::SetOperand(int64_t operand_index, int64_t operand) { - CHECK(operand >= std::numeric_limits::min() && operand <= std::numeric_limits::max()); + ICHECK(operand >= std::numeric_limits::min() && operand <= std::numeric_limits::max()); vm_.code.at(operand_index).v_int = static_cast(operand); } @@ -125,9 +125,9 @@ int CodeGenStackVM::GetStrID(const std::string& key) { } int CodeGenStackVM::AllocVarID(const VarNode* v) { - CHECK(!var_idmap_.count(v)); + ICHECK(!var_idmap_.count(v)); int vid = static_cast(vm_.heap_size); - CHECK_EQ(vm_.heap_size, var_idmap_.size()); + ICHECK_EQ(vm_.heap_size, var_idmap_.size()); vm_.heap_id_name.push_back(v->name_hint); ++vm_.heap_size; var_idmap_[v] = vid; @@ -136,7 +136,7 @@ int CodeGenStackVM::AllocVarID(const VarNode* v) { int CodeGenStackVM::GetVarID(const VarNode* v) const { auto it = var_idmap_.find(v); - CHECK(it != var_idmap_.end()) << "Find undefined Variable " << v->name_hint; + ICHECK(it != var_idmap_.end()) << "Find undefined Variable " << v->name_hint; return it->second; } @@ -177,7 +177,7 @@ void CodeGenStackVM::VisitStmt_(const AllocateNode* op) { void CodeGenStackVM::VisitExpr_(const CallNode* op) { if (op->op.same_as(builtin::address_of())) { const LoadNode* l = op->args[0].as(); - CHECK(op->args.size() == 1 && l); + ICHECK(op->args.size() == 1 && l); this->PushOp(StackVM::LOAD_HEAP, GetVarID(l->buffer_var.get())); this->Push(l->index); this->PushOp(StackVM::PUSH_I64, l->dtype.element_of().bytes()); @@ -186,11 +186,11 @@ void CodeGenStackVM::VisitExpr_(const CallNode* op) { } else if (op->op.same_as(builtin::reinterpret())) { this->Push(op->args[0]); } else if (op->op.same_as(builtin::tvm_struct_get())) { - CHECK_EQ(op->args.size(), 3U); + ICHECK_EQ(op->args.size(), 3U); int kind = op->args[2].as()->value; this->Push(op->args[0]); const IntImmNode* index = op->args[1].as(); - CHECK(index != nullptr); + ICHECK(index != nullptr); StackVM::Code code; code.op_code = StackVM::TVM_STRUCT_GET; vm_.code.push_back(code); @@ -199,9 +199,9 @@ void CodeGenStackVM::VisitExpr_(const CallNode* op) { code.v_int = MapFieldKind(kind); vm_.code.push_back(code); } else if (op->op.same_as(builtin::tvm_call_packed_lowered())) { - CHECK_GE(op->args.size(), 5U); + ICHECK_GE(op->args.size(), 5U); const StringImmNode* s = op->args[0].as(); - CHECK(s != nullptr) << "tvm_call_global expect first argument as function name"; + ICHECK(s != nullptr) << "tvm_call_global expect first argument as function name"; this->Push(op->args[1]); this->Push(op->args[2]); int begin = op->args[3].as()->value; @@ -228,10 +228,10 @@ void CodeGenStackVM::VisitExpr_(const CallNode* op) { code.v_int = end; vm_.code.push_back(code); } else if (op->op.same_as(builtin::tvm_stack_alloca())) { - CHECK_EQ(op->args.size(), 2U); + ICHECK_EQ(op->args.size(), 2U); const std::string& type = op->args[0].as()->value; const IntImmNode* num = op->args[1].as(); - CHECK(num != nullptr); + ICHECK(num != nullptr); static_assert(alignof(TVMValue) % alignof(DLTensor) == 0, "invariant"); // static_assert(alignof(TVMValue) % alignof(tvm_index_t) == 0, "invariant"); size_t unit = sizeof(TVMValue); @@ -251,7 +251,7 @@ void CodeGenStackVM::VisitExpr_(const CallNode* op) { vm_.stack_size += size; this->PushOp(StackVM::TVM_STACK_ALLOCA_BY_8BYTE, static_cast(size)); } else if (op->op.same_as(backend_alloc_workspace_op_)) { - CHECK_EQ(op->args.size(), 5U); + ICHECK_EQ(op->args.size(), 5U); this->Push(op->args[0]); this->Push(op->args[1]); this->Push(op->args[2]); @@ -259,7 +259,7 @@ void CodeGenStackVM::VisitExpr_(const CallNode* op) { this->Push(op->args[4]); this->PushOp(StackVM::TVM_DEVICE_ALLOCA); } else if (op->op.same_as(backend_free_workspace_op_)) { - CHECK_EQ(op->args.size(), 3U); + ICHECK_EQ(op->args.size(), 3U); this->Push(op->args[0]); this->Push(op->args[1]); this->Push(op->args[2]); @@ -267,7 +267,7 @@ void CodeGenStackVM::VisitExpr_(const CallNode* op) { } else if (op->op.same_as(builtin::tvm_throw_last_error())) { this->PushOp(StackVM::TVM_THROW_LAST_ERROR); } else if (op->op.same_as(builtin::isnullptr())) { - CHECK_EQ(op->args.size(), 1U); + ICHECK_EQ(op->args.size(), 1U); this->Push(op->args[0]); this->PushOp(StackVM::PUSH_I64, 0); this->PushOp(StackVM::EQ_HANDLE); @@ -305,8 +305,8 @@ void CodeGenStackVM::VisitExpr_(const StringImmNode* op) { } void CodeGenStackVM::VisitExpr_(const IntImmNode* op) { - CHECK(op->value >= std::numeric_limits::min() && - op->value <= std::numeric_limits::max()) + ICHECK(op->value >= std::numeric_limits::min() && + op->value <= std::numeric_limits::max()) << "Int constant exceed bound"; this->PushOp(StackVM::PUSH_I64, static_cast(op->value)); } @@ -399,7 +399,7 @@ void CodeGenStackVM::VisitExpr_(const NotNode* op) { } void CodeGenStackVM::VisitStmt_(const ForNode* op) { - CHECK(is_zero(op->min)); + ICHECK(is_zero(op->min)); int vid = this->AllocVarID(op->loop_var.get()); this->PushOp(StackVM::PUSH_I64, 0); int64_t loop_head = this->GetPC(); @@ -432,11 +432,11 @@ void CodeGenStackVM::VisitStmt_(const EvaluateNode* ev) { if (is_const_int(ev->value)) return; const CallNode* op = ev->value.as(); if (op && op->op.same_as(builtin::tvm_struct_set())) { - CHECK_EQ(op->args.size(), 4U); + ICHECK_EQ(op->args.size(), 4U); this->Push(op->args[0]); this->Push(op->args[3]); const IntImmNode* index = op->args[1].as(); - CHECK(index != nullptr); + ICHECK(index != nullptr); StackVM::Code code; code.op_code = StackVM::TVM_STRUCT_SET; vm_.code.push_back(code); @@ -515,14 +515,14 @@ runtime::Module BuildStackVM(IRModule mod, Target target) { std::string entry_func; for (auto kv : mod->functions) { - CHECK(kv.second->IsInstance()) << "CodeGenStackVM: Can only take PrimFunc"; + ICHECK(kv.second->IsInstance()) << "CodeGenStackVM: Can only take PrimFunc"; auto f = Downcast(kv.second); auto global_symbol = f->GetAttr(tvm::attr::kGlobalSymbol); - CHECK(global_symbol.defined()) + ICHECK(global_symbol.defined()) << "CodeGenStackVM: Expect PrimFunc to have the global_symbol attribute"; std::string f_name = global_symbol.value(); StackVM vm = codegen::CodeGenStackVM().Compile(f); - CHECK(!fmap.count(f_name)) << "Function name " << f_name << "already exist in list"; + ICHECK(!fmap.count(f_name)) << "Function name " << f_name << "already exist in list"; fmap[f_name] = std::move(vm); if (f->HasNonzeroAttr(tir::attr::kIsEntryFunc)) { diff --git a/src/target/tag.cc b/src/target/tag.cc index 3e47e456691a..8198435a9494 100644 --- a/src/target/tag.cc +++ b/src/target/tag.cc @@ -60,7 +60,7 @@ Map TargetTag::ListTags() { Target TargetTag::AddTag(String name, Map config, bool override) { TargetTagRegEntry& tag = TargetTagRegEntry::RegisterOrGet(name).set_name(); - CHECK(override || tag.tag_->config.empty()) + ICHECK(override || tag.tag_->config.empty()) << "Tag \"" << name << "\" has been previously defined as: " << tag.tag_->config; tag.set_config(config); return Target(config); diff --git a/src/target/target.cc b/src/target/target.cc index 052824249392..e44a15c3ff59 100644 --- a/src/target/target.cc +++ b/src/target/target.cc @@ -421,8 +421,8 @@ void Target::EnterWithScope() { void Target::ExitWithScope() { TVMTargetThreadLocalEntry* entry = TVMTargetThreadLocalStore::Get(); - CHECK(!entry->context_stack.empty()); - CHECK(entry->context_stack.top().same_as(*this)); + ICHECK(!entry->context_stack.empty()); + ICHECK(entry->context_stack.top().same_as(*this)); entry->context_stack.pop(); } @@ -431,7 +431,7 @@ Target Target::Current(bool allow_not_defined) { if (entry->context_stack.size() > 0) { return entry->context_stack.top(); } - CHECK(allow_not_defined) + ICHECK(allow_not_defined) << "Target context required. Please set it by constructing a TargetContext"; return Target(); @@ -473,8 +473,8 @@ ObjectPtr TargetInternal::FromString(const String& tag_or_config_or_targ ObjectPtr TargetInternal::FromConfigString(const String& config_str) { const auto* loader = tvm::runtime::Registry::Get("target._load_config_dict"); - CHECK(loader) << "AttributeError: \"target._load_config_dict\" is not registered. Please check " - "if the python module is properly loaded"; + ICHECK(loader) << "AttributeError: \"target._load_config_dict\" is not registered. Please check " + "if the python module is properly loaded"; Optional> config = (*loader)(config_str); if (!config.defined()) { throw dmlc::Error(": Cannot load config dict with python JSON loader"); diff --git a/src/target/target_kind.cc b/src/target/target_kind.cc index b5d2bf7ceb85..017ba396f861 100644 --- a/src/target/target_kind.cc +++ b/src/target/target_kind.cc @@ -122,7 +122,7 @@ void CheckOrSetAttr(Map* attrs, const String& name, const Str attrs->Set(name, value); } else { const auto* str = (*iter).second.as(); - CHECK(str != nullptr && GetRef(str) == value) + ICHECK(str != nullptr && GetRef(str) == value) << "ValueError: Expects \"" << name << "\" to be \"" << value << "\", but gets: " << (*iter).second; } @@ -143,7 +143,7 @@ Map UpdateNVPTXAttrs(Map attrs) { // If -mcpu has been specified, validate the correctness String mcpu = Downcast(attrs.at("mcpu")); arch = ExtractIntWithPrefix(mcpu, "sm_"); - CHECK(arch != -1) << "ValueError: NVPTX target gets an invalid CUDA arch: -mcpu=" << mcpu; + ICHECK(arch != -1) << "ValueError: NVPTX target gets an invalid CUDA arch: -mcpu=" << mcpu; } else { // Use the compute version of the first CUDA GPU instead TVMRetValue version; @@ -170,7 +170,7 @@ Map UpdateROCmAttrs(Map attrs) { if (attrs.count("mcpu")) { String mcpu = Downcast(attrs.at("mcpu")); arch = ExtractIntWithPrefix(mcpu, "gfx"); - CHECK(arch != -1) << "ValueError: ROCm target gets an invalid GFX version: -mcpu=" << mcpu; + ICHECK(arch != -1) << "ValueError: ROCm target gets an invalid GFX version: -mcpu=" << mcpu; } else { TVMRetValue val; if (!DetectDeviceFlag({kDLROCM, 0}, runtime::kGcnArch, &val)) { diff --git a/src/te/autodiff/ad_simplify.cc b/src/te/autodiff/ad_simplify.cc index 81df5c9d6b42..cc0e82066171 100644 --- a/src/te/autodiff/ad_simplify.cc +++ b/src/te/autodiff/ad_simplify.cc @@ -97,8 +97,8 @@ Array IterVarsFromMap(const Array& vars, const Map& vr IterVarType iter_type = kDataPar, std::string thread_tag = "") { Array res; for (const Var& v : vars) { - CHECK(vranges.count(v)) << "A range for the variable " << v << " was not provided in map " - << vranges; + ICHECK(vranges.count(v)) << "A range for the variable " << v << " was not provided in map " + << vranges; res.push_back(IterVar(vranges[v], v, iter_type, thread_tag)); } return res; @@ -478,7 +478,7 @@ class FactorOutAtomicFormulasFunctor // and a non-atomic residual. Atomic formulas are consts, calls, variables and comparisons (a <= b, // etc), i.e. formulas which are not logical operators (||, &&, !) on the top level. FactorOutAtomicFormulasResult FactorOutAtomicFormulas(const PrimExpr& e) { - CHECK(e.dtype().is_bool()); + ICHECK(e.dtype().is_bool()); return FactorOutAtomicFormulasFunctor().VisitExpr(e); } @@ -494,7 +494,7 @@ inline PrimExpr ModImpl(PrimExpr a, PrimExpr b, DivMode mode) { if (mode == kTruncDiv) { return truncmod(a, b); } else { - CHECK_EQ(mode, kFloorDiv); + ICHECK_EQ(mode, kFloorDiv); return floormod(a, b); } } @@ -503,7 +503,7 @@ inline PrimExpr DivImpl(PrimExpr a, PrimExpr b, DivMode mode) { if (mode == kTruncDiv) { return truncdiv(a, b); } else { - CHECK_EQ(mode, kFloorDiv); + ICHECK_EQ(mode, kFloorDiv); return floordiv(a, b); } } @@ -817,7 +817,7 @@ PrimExpr SimplifyReductionDomain(const PrimExpr& expr, const Map& ou // Extract from cond an implication of cond not containing vars std::pair ImplicationNotContainingVars( const PrimExpr& cond, const std::unordered_set& vars) { - CHECK(cond.dtype().is_bool()) << "The type of cond must be bool"; + ICHECK(cond.dtype().is_bool()) << "The type of cond must be bool"; // TODO(sgrechanik-h): NOTs could be pushed down using De Morgan laws // before running this function but this case didn't seem to be important enough. if (const AndNode* op = cond.as()) { @@ -938,7 +938,7 @@ class RemoveRedundantInequalitiesMutator : public ExprMutator { virtual PrimExpr VisitExpr_(const ReduceNode* op) { Array known_with_axes = known_; - CHECK(op->init.empty()) << "Derivative of Reduction with initialization is not implemented"; + ICHECK(op->init.empty()) << "Derivative of Reduction with initialization is not implemented"; for (const PrimExpr& axis_cond : IterVarsToInequalities(op->axis)) { known_with_axes.push_back(axis_cond); } @@ -1011,7 +1011,7 @@ PrimExpr TrySimplifyCompute(const PrimExpr& expr, const PrimExpr& cond, Array used_res_variables; for (const Var& var : res->dst->variables) { if (ExprUseVar(new_expr, var)) { - CHECK(res->dst->ranges.count(var)) << "Range of " << var << " cannot be inferred."; + ICHECK(res->dst->ranges.count(var)) << "Range of " << var << " cannot be inferred."; used_res_variables.push_back(var); } } @@ -1031,7 +1031,7 @@ PrimExpr TrySimplifyCompute(const PrimExpr& expr, const PrimExpr& cond, // Compute volumes before and after PrimExpr old_volume = make_const(DataType::Int(64), 1); for (const Var& var : outer_axis) { - CHECK(vranges.count(var)) << "Range of " << var << " was not provided."; + ICHECK(vranges.count(var)) << "Range of " << var << " was not provided."; old_volume = old_volume * vranges[var]->extent; } @@ -1069,7 +1069,7 @@ class ReductionAsTensorAccessMutator : public ExprMutator { ReductionAsTensorAccessMutator new_mutator(Concat(IterVarsToVars(op->axis), outer_axis_), Merge(vranges_, IterVarsToMap(op->axis)), name_); - CHECK(op->init.empty()) << "Derivative of Reduction with initialization is not implemented"; + ICHECK(op->init.empty()) << "Derivative of Reduction with initialization is not implemented"; Array new_source; for (const PrimExpr& src : op->source) { new_source.push_back(new_mutator(src)); @@ -1152,7 +1152,7 @@ PrimExpr RemoveJacobianAndLiftNonzeroCondImpl(const PrimExpr& expr_orig, const A PrimExpr expr = analyzer.Simplify(expr_orig, kSimplifyRewriteCanonicalRewrite); if (const ReduceNode* red = expr.as()) { - CHECK(red->init.empty()) << "Derivative of Reduction with initialization is not implemented"; + ICHECK(red->init.empty()) << "Derivative of Reduction with initialization is not implemented"; // TODO(sgrechanik-h): There are some other operations which behave like sum bool is_sum = IsSumCombiner(red->combiner, vranges); if (is_sum || CanFactorZeroFromCombiner(red->combiner, red->value_index, vranges)) { diff --git a/src/te/autodiff/jacobian.cc b/src/te/autodiff/jacobian.cc index ba03ba08febd..7104424957af 100644 --- a/src/te/autodiff/jacobian.cc +++ b/src/te/autodiff/jacobian.cc @@ -82,7 +82,7 @@ class JacobianMutator : public ExprMutator { auto tensor = Downcast(op->producer); if (input_.get() && tensor == input_) { // Tensor(indices) - CHECK_EQ(indices_.size(), op->indices.size()); + ICHECK_EQ(indices_.size(), op->indices.size()); PrimExpr condition = const_true(); for (size_t i = 0; i < input_.ndim(); ++i) { condition = And(condition, EQ(indices_[i], op->indices[i])); @@ -181,7 +181,8 @@ class JacobianMutator : public ExprMutator { PrimExpr expr_with_new_axes = te::CloneReduction(GetRef(op)); const ReduceNode* new_op = expr_with_new_axes.as(); - CHECK(new_op->init.empty()) << "Derivative of Reduction with initialization is not implemented"; + ICHECK(new_op->init.empty()) + << "Derivative of Reduction with initialization is not implemented"; // New lhs and rhs variables of the new combiner consist of // variables representing derivatives (which are later derived from new_op->source) @@ -303,7 +304,7 @@ PrimExpr Jacobian(const PrimExpr& expr, const Tensor& input, const Arrayop.as(); - CHECK(op) << "Derivative of this operation is not implemented: " << output->op; + ICHECK(op) << "Derivative of this operation is not implemented: " << output->op; bool is_input_tensor = false; for (const Tensor& child : op->InputTensors()) { if (input == child) { @@ -311,8 +312,8 @@ Tensor Jacobian(const Tensor& output, const Tensor& input) { break; } } - CHECK(is_input_tensor) << "Jacobian is called on a pair of tensors such that the output " - << "does not directly depend on the input."; + ICHECK(is_input_tensor) << "Jacobian is called on a pair of tensors such that the output " + << "does not directly depend on the input."; // We have to clone the iteration axes because otherwise the original expression // cannot be used together with the derivative (it will lead to errors during lowering) diff --git a/src/te/operation/compute_op.cc b/src/te/operation/compute_op.cc index 64995761524b..3b225760d75d 100644 --- a/src/te/operation/compute_op.cc +++ b/src/te/operation/compute_op.cc @@ -74,12 +74,12 @@ Array BaseComputeOpNode::root_iter_vars() const { } DataType ComputeOpNode::output_dtype(size_t idx) const { - CHECK_LT(idx, num_outputs()); + ICHECK_LT(idx, num_outputs()); return body[idx].dtype(); } Array BaseComputeOpNode::output_shape(size_t idx) const { - CHECK_LT(idx, num_outputs()); + ICHECK_LT(idx, num_outputs()); // for now, all outputs of a BaseComputeOp have the same shape Array shape; for (const auto& ivar : this->axis) { @@ -170,7 +170,7 @@ Array ComputeOpNode::InputTensors() const { Operation ComputeOpNode::ReplaceInputs(const Operation& self, const std::unordered_map& rmap) const { - CHECK_EQ(self.operator->(), this); + ICHECK_EQ(self.operator->(), this); VerifyComputeOp(this); Array arr; if (this->body[0]->IsInstance()) { @@ -202,7 +202,7 @@ Operation ComputeOpNode::ReplaceInputs(const Operation& self, void ComputeOpNode::PropBoundToInputs(const Operation& self, arith::Analyzer* analyzer, const std::unordered_map& dom_map, std::unordered_map* out_dom_map) const { - CHECK_EQ(self.operator->(), this); + ICHECK_EQ(self.operator->(), this); auto fvisit = [&dom_map, out_dom_map, analyzer](const ObjectRef& n) { if (auto* pload = n.as()) { Tensor t = Downcast(pload->producer); @@ -245,15 +245,15 @@ void ComputeOpNode::PropBoundToInputs(const Operation& self, arith::Analyzer* an void BaseComputeOpNode::GatherBound(const Operation& self, const std::unordered_map& tensor_dom, std::unordered_map* out_dom_map) const { - CHECK_EQ(self.operator->(), this); + ICHECK_EQ(self.operator->(), this); const TensorDom& tdom = tensor_dom.at(self.output(0)); for (size_t i = 0; i < this->axis.size(); ++i) { Range r = arith::Union(tdom.data.at(i)).CoverRange(this->axis[i]->dom); - CHECK(!out_dom_map->count(this->axis[i])); + ICHECK(!out_dom_map->count(this->axis[i])); (*out_dom_map)[this->axis[i]] = r; } for (size_t i = 0; i < this->reduce_axis.size(); ++i) { - CHECK(!out_dom_map->count(this->reduce_axis[i])); + ICHECK(!out_dom_map->count(this->reduce_axis[i])); (*out_dom_map)[this->reduce_axis[i]] = this->reduce_axis[i]->dom; } } @@ -261,7 +261,7 @@ void BaseComputeOpNode::GatherBound(const Operation& self, Stmt BaseComputeOpNode::BuildRealize(const Stage& stage, const std::unordered_map& realize_map, const Stmt& body) const { - CHECK_EQ(stage->op.get(), this); + ICHECK_EQ(stage->op.get(), this); Region bounds; for (IterVar iv : this->axis) { bounds.push_back(realize_map.at(iv)); @@ -301,9 +301,9 @@ void MakeReduction(const ComputeOpNode* op, const Array& tensors, Stmt* size_t size = op->body.size(); const ReduceNode* reduce = op->body[0].as(); - CHECK(reduce); + ICHECK(reduce); const CommReducerNode* combiner = reduce->combiner.as(); - CHECK(combiner); + ICHECK(combiner); Array lhs; for (size_t i = 0; i < size; ++i) { lhs.push_back(tensors[i](args)); @@ -405,11 +405,11 @@ ComputeType DetectComputeType(const ComputeOpNode* self, const Stage& stage) { ++normal_red; } } else { - CHECK_EQ(thread_red, 0) << "Cross thread reduce cannot swap with normal data axis"; + ICHECK_EQ(thread_red, 0) << "Cross thread reduce cannot swap with normal data axis"; } } if (tensorize != 0) { - CHECK(thread_red == 0) << "Cannot mix cross thread reduction with Tensorize"; + ICHECK(thread_red == 0) << "Cannot mix cross thread reduction with Tensorize"; return ComputeType::kTensorize; } if (thread_red != 0) { @@ -423,7 +423,7 @@ ComputeType DetectComputeType(const ComputeOpNode* self, const Stage& stage) { Stmt ComputeOpNode::BuildProvide(const Stage& stage, const std::unordered_map& dom_map, bool debug_keep_trivial_loop) const { - CHECK_EQ(stage->op.operator->(), this); + ICHECK_EQ(stage->op.operator->(), this); ComputeType ctype = DetectComputeType(this, stage); if (ctype == ComputeType::kCrossThreadReduction) { // specially handle cross thread reduction. @@ -438,7 +438,7 @@ Stmt ComputeOpNode::BuildProvide(const Stage& stage, ComputeLoopNest ComputeLoopNest::Create(const BaseComputeOpNode* self, const Stage& stage, const std::unordered_map& dom_map, bool debug_keep_trivial_loop) { - CHECK_EQ(stage->op.operator->(), self); + ICHECK_EQ(stage->op.operator->(), self); ComputeLoopNest ret; // make main loop nest ret.main_nest = MakeLoopNest(stage, dom_map, 0, false, std::unordered_set(), @@ -489,7 +489,7 @@ ComputeLoopNest ComputeLoopNest::Create(const BaseComputeOpNode* self, const Sta e = likely(e); } } else { - CHECK_EQ(ret.main_nest.size(), stage->leaf_iter_vars.size() + 1); + ICHECK_EQ(ret.main_nest.size(), stage->leaf_iter_vars.size() + 1); ret.num_common_loop = stage->leaf_iter_vars.size(); } // copy elison here. @@ -524,12 +524,12 @@ class ComputeVerifier final : protected tir::ExprVisitor { for (const PrimExpr e : compute_->body) { // Check for consistency of top level reductions const tir::ReduceNode* reduce = e.as(); - CHECK((reduce && reduce_) || (!reduce && !reduce_)) << "All ComputeOp should be consistent " - << "with being Reduce operation or not."; + ICHECK((reduce && reduce_) || (!reduce && !reduce_)) << "All ComputeOp should be consistent " + << "with being Reduce operation or not."; if (reduce && reduce_) { - CHECK(ReduceEqual(reduce, reduce_)) << "The Reduce inputs of ComputeOp should " - << "have the same attribute except value_index"; + ICHECK(ReduceEqual(reduce, reduce_)) << "The Reduce inputs of ComputeOp should " + << "have the same attribute except value_index"; } level_ = 0; @@ -548,8 +548,8 @@ class ComputeVerifier final : protected tir::ExprVisitor { void VisitExpr_(const tir::ReduceNode* op) final { // Check for non top level reductions - CHECK(0 == level_) << "Reductions are only allowed at the top level of compute. " - << "Please create another tensor for further composition."; + ICHECK(0 == level_) << "Reductions are only allowed at the top level of compute. " + << "Please create another tensor for further composition."; } //@} @@ -581,7 +581,7 @@ Stmt TransformUpdate(const Stage& stage, const std::unordered_mapiter_type == kCommReduce) { auto vit = dom_map.find(iv); - CHECK(vit != dom_map.end()); + ICHECK(vit != dom_map.end()); const Range& vrange = vit->second; conds.push_back(likely(iv->var > vrange->min)); banned.insert(iv->var.get()); diff --git a/src/te/operation/cross_thread_reduction.cc b/src/te/operation/cross_thread_reduction.cc index 6aba9ab500b6..b0fb9b667558 100644 --- a/src/te/operation/cross_thread_reduction.cc +++ b/src/te/operation/cross_thread_reduction.cc @@ -92,12 +92,13 @@ Stmt MakeCrossThreadReduction(const ComputeOpNode* self, const Stage& stage, debug_keep_trivial_loop); size_t size = self->body.size(); - CHECK_GT(size, 0); + ICHECK_GT(size, 0); std::vector reduces(size); for (size_t i = 0; i < size; ++i) { const ReduceNode* reduce = self->body[i].as(); - CHECK(reduce); - CHECK(reduce->init.empty()) << "Cannot perform cross_thread_reduction for reductions with init"; + ICHECK(reduce); + ICHECK(reduce->init.empty()) + << "Cannot perform cross_thread_reduction for reductions with init"; reduces[i] = reduce; } @@ -140,7 +141,7 @@ Stmt MakeCrossThreadReduction(const ComputeOpNode* self, const Stage& stage, normal_init.reserve(size); normal_update.resize(size); const CommReducerNode* combiner = reduces[0]->combiner.as(); - CHECK(combiner); + ICHECK(combiner); Array lhs; for (size_t i = 0; i < size; ++i) { DataType t = reduces[i]->dtype; diff --git a/src/te/operation/extern_op.cc b/src/te/operation/extern_op.cc index 2afdd4a93c7e..1c9a3cb336ae 100644 --- a/src/te/operation/extern_op.cc +++ b/src/te/operation/extern_op.cc @@ -60,14 +60,14 @@ ExternOp::ExternOp(std::string name, std::string tag, Map att n->name = std::move(name); n->tag = std::move(tag); n->attrs = std::move(attrs); - CHECK_EQ(inputs.size(), input_placeholders.size()); + ICHECK_EQ(inputs.size(), input_placeholders.size()); for (size_t i = 0; i < inputs.size(); ++i) { - CHECK_EQ(inputs[i]->dtype, input_placeholders[i]->dtype); - CHECK_EQ(inputs[i]->shape.size(), input_placeholders[i]->shape.size()); + ICHECK_EQ(inputs[i]->dtype, input_placeholders[i]->dtype); + ICHECK_EQ(inputs[i]->shape.size(), input_placeholders[i]->shape.size()); for (size_t dim = 0; dim < inputs[i]->shape.size(); ++dim) { - CHECK(inputs[i]->shape[dim].same_as(input_placeholders[i]->shape[dim])); + ICHECK(inputs[i]->shape[dim].same_as(input_placeholders[i]->shape[dim])); } - CHECK_EQ(input_placeholders[i]->strides.size(), 0U); + ICHECK_EQ(input_placeholders[i]->strides.size(), 0U); } n->inputs = std::move(inputs); n->input_placeholders = std::move(input_placeholders); @@ -87,7 +87,7 @@ Array ExternOpNode::InputTensors() const { return inputs; } Operation ExternOpNode::ReplaceInputs(const Operation& self, const std::unordered_map& rmap) const { - CHECK_EQ(self.operator->(), this); + ICHECK_EQ(self.operator->(), this); auto n = make_object(*this); n->body = ReplaceTensor(this->body, rmap); for (size_t i = 0; i < n->inputs.size(); ++i) { @@ -125,7 +125,7 @@ void ExternOpNode::GatherBound(const Operation& self, Stmt ExternOpNode::BuildRealize(const Stage& stage, const std::unordered_map& realize_map, const Stmt& body) const { - CHECK_EQ(stage->op.get(), this); + ICHECK_EQ(stage->op.get(), this); Stmt realize_body = body; for (int k = 0; k < num_outputs(); ++k) { Tensor t = stage->op.output(k); @@ -141,7 +141,7 @@ Stmt ExternOpNode::BuildRealize(const Stage& stage, Stmt ExternOpNode::BuildProvide(const Stage& stage, const std::unordered_map& dom_map, bool debug_keep_trivial_loop) const { - CHECK_EQ(stage->op.operator->(), this); + ICHECK_EQ(stage->op.operator->(), this); Stmt ret = AttrStmt(make_zero(DataType::Int(32)), tir::attr::extern_scope, 0, this->body); auto f_push_bind = [&ret](Buffer buffer, Tensor tensor) { Array bind_spec; diff --git a/src/te/operation/hybrid_op.cc b/src/te/operation/hybrid_op.cc index 98270e9a2952..94e06d206ddb 100644 --- a/src/te/operation/hybrid_op.cc +++ b/src/te/operation/hybrid_op.cc @@ -101,7 +101,7 @@ Array HybridOpNode::InputTensors() const { Operation HybridOpNode::ReplaceInputs(const Operation& self, const std::unordered_map& rmap) const { - CHECK_EQ(self.operator->(), this); + ICHECK_EQ(self.operator->(), this); auto n = make_object(*this); n->body = te::ReplaceTensor(this->body, rmap); for (size_t i = 0; i < n->inputs.size(); ++i) { @@ -137,7 +137,7 @@ void HybridOpNode::GatherBound(const Operation& self, const std::unordered_map& tensor_dom, std::unordered_map* out_dom_map) const { for (auto iter_var : axis) { - CHECK(!out_dom_map->count(iter_var)); + ICHECK(!out_dom_map->count(iter_var)); out_dom_map->operator[](iter_var) = iter_var->dom; } } @@ -146,7 +146,7 @@ Stmt HybridOpNode::BuildRealize(const Stage& stage, const std::unordered_map& realize_map, const Stmt& body) const { // TODO(@were): Add attribute inject here and remove it from hybrid parser. - CHECK_EQ(stage->op.get(), this); + ICHECK_EQ(stage->op.get(), this); Stmt realize_body = body; for (int k = 0; k < num_outputs(); ++k) { Tensor t = stage->op.output(k); @@ -162,7 +162,7 @@ Stmt HybridOpNode::BuildRealize(const Stage& stage, Stmt HybridOpNode::BuildProvide(const Stage& stage, const std::unordered_map& dom_map, bool debug_keep_trivial_loop) const { - CHECK_EQ(stage->op.operator->(), this); + ICHECK_EQ(stage->op.operator->(), this); Stmt ret = AttrStmt(make_zero(DataType::Int(32)), tir::attr::extern_scope, 0, this->body); std::unordered_map rmap; for (int i = 0; i < this->num_outputs(); ++i) { @@ -213,14 +213,14 @@ Stmt ApplyLoopShapes(const Stage& stage, const std::unordered_mapparent->var.get(); auto& inner_ = split->inner; - CHECK(dom_map.count(inner_)); + ICHECK(dom_map.count(inner_)); auto& inner_dom = dom_map.find(inner_)->second; - CHECK(is_const_int(inner_dom->min, 0)); + ICHECK(is_const_int(inner_dom->min, 0)); auto& outer_ = split->outer; - CHECK(dom_map.count(outer_)); + ICHECK(dom_map.count(outer_)); auto& outer_dom = dom_map.find(outer_)->second; - CHECK(is_const_int(outer_dom->min, 0)); + ICHECK(is_const_int(outer_dom->min, 0)); inner = IterVar(inner_dom, inner_->var, inner_->iter_type); outer = IterVar(outer_dom, outer_->var, outer_->iter_type); @@ -264,7 +264,7 @@ Stmt ApplyLoopShapes(const Stage& stage, const std::unordered_maploop_var.get() == inner) { - CHECK(under_outer); + ICHECK(under_outer); std::unordered_map rmap; rmap[op->loop_var.get()] = indexmod(parent, op->extent); extent = op->extent; @@ -295,11 +295,11 @@ Stmt ApplyLoopShapes(const Stage& stage, const std::unordered_map()) { LoopSpliter Spliter(split, dom_map); stmt = Spliter(stmt); - CHECK(Spliter.splitted); + ICHECK(Spliter.splitted); } else if (const FuseNode* fuse = rel.as()) { LoopFuser Fuser(fuse); stmt = Fuser(stmt); - CHECK(Fuser.fused); + ICHECK(Fuser.fused); } } @@ -322,8 +322,8 @@ Stmt ApplyLoopAnnotations(const Stage& stage, const std::unordered_mapbind_thread.defined()) { const auto& iter_var = attr->bind_thread; if (iter_var->dom.defined()) { - CHECK(is_const_int(iter_var->dom->min, 0)); - CHECK(expr_equal(iter_var->dom->extent, op->extent)) + ICHECK(is_const_int(iter_var->dom->min, 0)); + ICHECK(expr_equal(iter_var->dom->extent, op->extent)) << "Thread extent and loop extent mismatch!\n"; } std::unordered_map rmap; @@ -361,7 +361,7 @@ Stmt ApplyLoopAnnotations(const Stage& stage, const std::unordered_map }); std::reverse(current_order.begin(), current_order.end()); auto& required_ord = stage->leaf_iter_vars; - CHECK_EQ(current_order.size(), required_ord.size()) << "Cannot reorder the loops!"; + ICHECK_EQ(current_order.size(), required_ord.size()) << "Cannot reorder the loops!"; std::unordered_map reorder; bool need_reorder = false; for (size_t i = 0; i < current_order.size(); ++i) { auto& current = current_order[i]; const IterVar& iter_var = required_ord[i]; const IterVar& required = rebased.count(iter_var) ? rebased.find(iter_var)->second : iter_var; - CHECK(required->dom.defined() || dom_map.count(required)) << required << "\n"; + ICHECK(required->dom.defined() || dom_map.count(required)) << required << "\n"; reorder[current] = required; if (current != required->var.get()) { need_reorder = true; @@ -404,7 +404,7 @@ Stmt ApplyLoopOrder(const Stage& stage, const std::unordered_map Stmt VisitStmt_(const ForNode* op) final { // Reorder from in to out Stmt body_ = this->VisitStmt(op->body); - CHECK(reorder.count(op->loop_var.get())); + ICHECK(reorder.count(op->loop_var.get())); auto target = reorder.find(op->loop_var.get())->second; if (body_.same_as(op->body) && op->loop_var.get() == target->var.get()) return GetRef(op); @@ -431,8 +431,8 @@ Stmt ApplySchedule(const Stage& stage, const std::unordered_map& for (auto rel : stage->relations) { if (const auto* rebase = rel.as()) { rebased[rebase->rebased] = rebase->parent; - CHECK(rebase->parent->dom.defined()); - CHECK(dom_map.count(rebase->rebased)); + ICHECK(rebase->parent->dom.defined()); + ICHECK(dom_map.count(rebase->rebased)); } } stmt = ApplyLoopShapes(stage, dom_map, stmt); diff --git a/src/te/operation/op_utils.cc b/src/te/operation/op_utils.cc index 80f7fe2b4e41..f1991c181e67 100644 --- a/src/te/operation/op_utils.cc +++ b/src/te/operation/op_utils.cc @@ -100,7 +100,7 @@ std::vector > MakeLoopNest(const Stage& stage, default: LOG(FATAL) << "Unknown iter type" << it_attr->iter_type << " in the iter_var_attrs"; } - CHECK_EQ(it_attr->pragma_keys.size(), it_attr->pragma_values.size()); + ICHECK_EQ(it_attr->pragma_keys.size(), it_attr->pragma_values.size()); for (size_t k = 0; k < it_attr->pragma_keys.size(); ++k) { const std::string& pkey = it_attr->pragma_keys[k].as()->value; PrimExpr pvalue = it_attr->pragma_values[k]; @@ -125,8 +125,8 @@ std::vector > MakeLoopNest(const Stage& stage, nest[i + 1].emplace_back(LetStmt(var, new_value, no_op)); } if (it_attr.defined() && it_attr->prefetch_data.size() != 0) { - CHECK(!is_one(dom->extent)) << "Cannot prefetch on trivial loop with extent=1"; - CHECK_EQ(it_attr->prefetch_data.size(), it_attr->prefetch_offset.size()); + ICHECK(!is_one(dom->extent)) << "Cannot prefetch on trivial loop with extent=1"; + ICHECK_EQ(it_attr->prefetch_data.size(), it_attr->prefetch_offset.size()); for (size_t j = 0; j < it_attr->prefetch_data.size(); ++j) { nest[i + 1].emplace_back(AttrStmt(it_attr->prefetch_data[j], tir::attr::prefetch_scope, it_attr->prefetch_offset[j], no_op)); @@ -135,23 +135,23 @@ std::vector > MakeLoopNest(const Stage& stage, } else if (bind_iv->thread_tag == "vthread" || bind_iv->thread_tag == "cthread") { // virtual thread // Always restrict threaded IterVar to starts from 0. - CHECK(is_zero(dom->min)); - CHECK(is_positive_const(dom->extent)); + ICHECK(is_zero(dom->min)); + ICHECK(is_positive_const(dom->extent)); // annotate the extent of the IterVar nest[i + 1].emplace_back(AttrStmt(bind_iv, tir::attr::virtual_thread, dom->extent, no_op)); value_map[iv] = var; } else if (bind_iv->thread_tag == "pipeline") { // pipeline marker. - CHECK(is_zero(dom->min)); - CHECK(is_one(dom->extent)); + ICHECK(is_zero(dom->min)); + ICHECK(is_one(dom->extent)); // annotate the extent of the IterVar nest[i + 1].emplace_back( AttrStmt(bind_iv, tir::attr::pipeline_exec_scope, dom->extent, no_op)); value_map[iv] = dom->min; } else { // Always restrict threaded IterVar to starts from 0. - CHECK(is_zero(dom->min)) << "Itervar " << iv << " must start at zero, but it starts at " - << dom->min; + ICHECK(is_zero(dom->min)) << "Itervar " << iv << " must start at zero, but it starts at " + << dom->min; // annotate the extent of the IterVar nest[i + 1].emplace_back(AttrStmt(bind_iv, tir::attr::thread_extent, dom->extent, no_op)); if (!debug_keep_trivial_loop && is_one(dom->extent)) { @@ -205,7 +205,7 @@ class TensorReplacer : public tir::StmtExprMutator { PrimExpr VisitExpr_(const tir::ProducerLoadNode* op) final { PrimExpr expr = StmtExprMutator::VisitExpr_(op); op = expr.as(); - CHECK(op != nullptr); + ICHECK(op != nullptr); Tensor t = Downcast(op->producer); auto it = vmap_.find(t); diff --git a/src/te/operation/placeholder_op.cc b/src/te/operation/placeholder_op.cc index 5b7ede314e49..c51e53e16cd1 100644 --- a/src/te/operation/placeholder_op.cc +++ b/src/te/operation/placeholder_op.cc @@ -41,12 +41,12 @@ int PlaceholderOpNode::num_outputs() const { return 1; } Array PlaceholderOpNode::root_iter_vars() const { return {}; } DataType PlaceholderOpNode::output_dtype(size_t i) const { - CHECK_EQ(i, 0U); + ICHECK_EQ(i, 0U); return dtype; } Array PlaceholderOpNode::output_shape(size_t i) const { - CHECK_EQ(i, 0U); + ICHECK_EQ(i, 0U); return shape; } diff --git a/src/te/operation/scan_op.cc b/src/te/operation/scan_op.cc index 726714580b78..a555e86097b7 100644 --- a/src/te/operation/scan_op.cc +++ b/src/te/operation/scan_op.cc @@ -51,7 +51,7 @@ Array ScanOpNode::root_iter_vars() const { DataType ScanOpNode::output_dtype(size_t i) const { return update[i]->dtype; } Array ScanOpNode::output_shape(size_t i) const { - CHECK_LT(i, state_placeholder.size()); + ICHECK_LT(i, state_placeholder.size()); return state_placeholder[i]->shape; } @@ -62,27 +62,27 @@ ScanOp::ScanOp(std::string name, std::string tag, Map attrs, attrs = Map(); } auto n = make_object(); - CHECK_EQ(init.size(), update.size()); - CHECK_EQ(init.size(), state_placeholder.size()); + ICHECK_EQ(init.size(), update.size()); + ICHECK_EQ(init.size(), state_placeholder.size()); arith::Analyzer analyzer; auto prove_equal = [&](PrimExpr lhs, PrimExpr rhs) { return is_zero(analyzer.Simplify(lhs - rhs)); }; for (size_t i = 0; i < init.size(); ++i) { - CHECK_EQ(init[i]->dtype, state_placeholder[i]->dtype); - CHECK_EQ(init[i]->dtype, update[i]->dtype); - CHECK(prove_equal(init[i]->shape[0], axis->dom->min)) + ICHECK_EQ(init[i]->dtype, state_placeholder[i]->dtype); + ICHECK_EQ(init[i]->dtype, update[i]->dtype); + ICHECK(prove_equal(init[i]->shape[0], axis->dom->min)) << "init.shape[0] need to match scan_axis.dom.min"; - CHECK(prove_equal(state_placeholder[i]->shape[0], axis->dom->min + axis->dom->extent)) + ICHECK(prove_equal(state_placeholder[i]->shape[0], axis->dom->min + axis->dom->extent)) << "state_placeholder.shape[0] need to match" << " scan_axis.dom.min + scan_axis.dom.extent"; - CHECK_EQ(state_placeholder[i].ndim(), init[i].ndim()) + ICHECK_EQ(state_placeholder[i].ndim(), init[i].ndim()) << "The dimension of init need to match state_placeholder"; - CHECK_EQ(update[i].ndim(), state_placeholder[i].ndim()) + ICHECK_EQ(update[i].ndim(), state_placeholder[i].ndim()) << "The update.ndim need to be state_placeholder.ndim - 1"; for (size_t k = 0; k < update[i].ndim(); ++k) { - CHECK(prove_equal(update[i]->shape[k], state_placeholder[i]->shape[k])); + ICHECK(prove_equal(update[i]->shape[k], state_placeholder[i]->shape[k])); if (k != 0) { // setup spatial axis std::ostringstream spatial_name; @@ -93,7 +93,7 @@ ScanOp::ScanOp(std::string name, std::string tag, Map attrs, } for (size_t k = 1; k < init[i].ndim(); ++k) { - CHECK(prove_equal(init[i]->shape[k], state_placeholder[i]->shape[k])); + ICHECK(prove_equal(init[i]->shape[k], state_placeholder[i]->shape[k])); } } n->name = std::move(name); @@ -141,7 +141,7 @@ Array ScanOpNode::InputTensors() const { Operation ScanOpNode::ReplaceInputs(const Operation& self, const std::unordered_map& rmap) const { - CHECK_EQ(self.operator->(), this); + ICHECK_EQ(self.operator->(), this); auto n = make_object(*this); for (size_t i = 0; i < n->init.size(); ++i) { if (rmap.count(n->init[i])) { @@ -161,7 +161,7 @@ Operation ScanOpNode::ReplaceInputs(const Operation& self, void ScanOpNode::PropBoundToInputs(const Operation& self, arith::Analyzer* analyzer, const std::unordered_map& dom_map, std::unordered_map* out_dom_map) const { - CHECK_EQ(self.operator->(), this); + ICHECK_EQ(self.operator->(), this); for (size_t i = 0, sp_idx = 0; i < this->init.size(); ++i) { TensorDom* init_dom = nullptr; TensorDom* update_dom = nullptr; @@ -195,8 +195,8 @@ void ScanOpNode::PropBoundToInputs(const Operation& self, arith::Analyzer* analy void ScanOpNode::GatherBound(const Operation& self, const std::unordered_map& tensor_dom, std::unordered_map* out_dom_map) const { - CHECK_EQ(self.operator->(), this); - CHECK(!out_dom_map->count(this->scan_axis)); + ICHECK_EQ(self.operator->(), this); + ICHECK(!out_dom_map->count(this->scan_axis)); std::vector output(this->num_outputs()); for (size_t i = 0; i < output.size(); ++i) { output[i] = self.output(i); @@ -207,7 +207,7 @@ void ScanOpNode::GatherBound(const Operation& self, const TensorDom& d = tensor_dom.at(output[i]); time_dom.insert(time_dom.end(), d.data[0].begin(), d.data[0].end()); } - CHECK(!out_dom_map->count(this->scan_axis)); + ICHECK(!out_dom_map->count(this->scan_axis)); arith::Analyzer analyzer; Range sdom = this->scan_axis->dom; Range r = arith::Union(time_dom).CoverRange(sdom); @@ -220,8 +220,8 @@ void ScanOpNode::GatherBound(const Operation& self, const TensorDom& d = tensor_dom.at(output[i]); for (size_t k = 1; k < this->update[i]->shape.size(); ++k, ++sp_idx) { IterVar sp_ax = this->spatial_axis_[sp_idx]; - CHECK(!out_dom_map->count(sp_ax)); - CHECK(fix_pt.count(sp_ax)); + ICHECK(!out_dom_map->count(sp_ax)); + ICHECK(fix_pt.count(sp_ax)); if (fix_pt[sp_ax].as()->value) { // fix point, we can slice it. (*out_dom_map)[sp_ax] = arith::Union(d.data[k]).CoverRange(sp_ax->dom); @@ -236,14 +236,14 @@ void ScanOpNode::GatherBound(const Operation& self, Stmt ScanOpNode::BuildRealize(const Stage& stage, const std::unordered_map& dom_map, const Stmt& body) const { arith::Analyzer analyzer; - CHECK_EQ(stage->op.get(), this); + ICHECK_EQ(stage->op.get(), this); Range sdom = dom_map.at(this->scan_axis); Range tdom = Range::FromMinExtent(0, analyzer.Simplify(sdom->extent + sdom->min)); Stmt ret = body; size_t sp_idx = 0; for (size_t i = 0; i < update.size(); ++i) { Tensor t = stage->op.output(i); - CHECK_EQ(static_cast(t->value_index), i); + ICHECK_EQ(static_cast(t->value_index), i); Region bounds; bounds.push_back(tdom); for (size_t k = 1; k < this->update[i]->shape.size(); ++k, ++sp_idx) { @@ -257,14 +257,14 @@ Stmt ScanOpNode::BuildRealize(const Stage& stage, const std::unordered_map& dom_map, bool debug_keep_trivial_loop) const { - CHECK_EQ(stage->op.operator->(), this); + ICHECK_EQ(stage->op.operator->(), this); Stmt provide = AttrStmt(stage->op, tir::attr::scan_update_scope, this->scan_axis->var, Evaluate(0)); Stmt init = AttrStmt(stage->op, tir::attr::scan_init_scope, 0, Evaluate(0)); size_t begin_scan = 0; for (size_t i = 0; i < stage->leaf_iter_vars.size(); ++i) { if (stage->leaf_iter_vars[i]->iter_type == kThreadIndex) { - CHECK_EQ(begin_scan, i); + ICHECK_EQ(begin_scan, i); begin_scan = i + 1; } } diff --git a/src/te/operation/tensor_compute_op.cc b/src/te/operation/tensor_compute_op.cc index ecb2e860c3e6..262e5a2b97f4 100644 --- a/src/te/operation/tensor_compute_op.cc +++ b/src/te/operation/tensor_compute_op.cc @@ -83,7 +83,7 @@ Array TensorComputeOpNode::InputTensors() const { return inputs; } Operation TensorComputeOpNode::ReplaceInputs(const Operation& self, const std::unordered_map& rmap) const { - CHECK_EQ(self.operator->(), this); + ICHECK_EQ(self.operator->(), this); auto n = make_object(*this); auto intrin = make_object(*(this->intrin.operator->())); intrin->body = ReplaceTensor(this->intrin->body, rmap); @@ -132,7 +132,7 @@ size_t TensorComputeOpNode::num_schedulable_dims() const { return schedulable_nd Stmt TensorComputeOpNode::BuildProvide(const Stage& stage, const std::unordered_map& dom_map, bool debug_keep_trivial_loop) const { - CHECK_EQ(stage->op.operator->(), this); + ICHECK_EQ(stage->op.operator->(), this); // Start bind data. Stmt nop = Evaluate(0); @@ -194,7 +194,7 @@ Stmt TensorComputeOpNode::BuildProvide(const Stage& stage, PrimExpr esp = sp; sp_expr.push_back(esp); } - CHECK_EQ(sp_expr.size(), user_expr.size()); + ICHECK_EQ(sp_expr.size(), user_expr.size()); // TODO(jdavies-huawei): what name should be used here? binder.BindArray(sp_expr, user_expr, this->name); @@ -204,8 +204,8 @@ Stmt TensorComputeOpNode::BuildProvide(const Stage& stage, if (this->reduce_axis.size() == 0) { std::vector > nest(n.main_nest.begin(), n.main_nest.begin() + tloc + 1); nest.emplace_back(MakeIfNest(n.main_predicates)); - CHECK_EQ(n.init_predicates.size(), 0U); - CHECK(this->intrin->body.defined()) + ICHECK_EQ(n.init_predicates.size(), 0U); + ICHECK(this->intrin->body.defined()) << "Normal store op for intrin " << this << " is not defined"; Stmt body = MergeNest(output_bind_nest, this->intrin->body); body = MergeNest(input_bind_nest, body); @@ -216,9 +216,9 @@ Stmt TensorComputeOpNode::BuildProvide(const Stage& stage, return ret; } else { // Need to split reduction - CHECK(this->intrin->reduce_update.defined()) << "Reduction update op is not defined"; + ICHECK(this->intrin->reduce_update.defined()) << "Reduction update op is not defined"; // Need init and update steps - CHECK_NE(this->reduce_axis.size(), 0U); + ICHECK_NE(this->reduce_axis.size(), 0U); std::vector > common(n.main_nest.begin(), n.main_nest.begin() + n.num_common_loop + 1); std::vector > update_nest(n.main_nest.begin() + n.num_common_loop + 1, @@ -243,7 +243,7 @@ Stmt TensorComputeOpNode::BuildProvide(const Stage& stage, return MergeNest(common, SeqStmt::Flatten(init, update)); } else { // When init op is not available, use body op for reset in the first iter. - CHECK(this->intrin->body.defined()) << "Normal body op is not defined"; + ICHECK(this->intrin->body.defined()) << "Normal body op is not defined"; Stmt update = TransformUpdate(stage, dom_map, n, this->intrin->body, this->intrin->reduce_update); update = MergeNest(output_bind_nest, update); diff --git a/src/te/operation/tensorize.cc b/src/te/operation/tensorize.cc index 9733cd940a9f..bfd1ec579818 100644 --- a/src/te/operation/tensorize.cc +++ b/src/te/operation/tensorize.cc @@ -50,14 +50,14 @@ size_t InferTensorizeRegion(const ComputeOpNode* self, const Stage& stage, // Loop over the leafs for (size_t i = stage->leaf_iter_vars.size(); i != 0; --i) { IterVar iv = stage->leaf_iter_vars[i - 1]; - CHECK(iv->iter_type == kDataPar || iv->iter_type == kCommReduce); + ICHECK(iv->iter_type == kDataPar || iv->iter_type == kCommReduce); auto vit = dom_map.find(iv); - CHECK(vit != dom_map.end()); + ICHECK(vit != dom_map.end()); const Range& vrange = vit->second; if (is_one(vrange->extent)) { up_state[iv] = IntSet::SinglePoint(vrange->min); } else if (found_point) { - CHECK(is_zero(vrange->min)); + ICHECK(is_zero(vrange->min)); up_state[iv] = IntSet::SinglePoint(iv->var); } else { up_state[iv] = IntSet::FromRange(vrange); @@ -66,16 +66,16 @@ size_t InferTensorizeRegion(const ComputeOpNode* self, const Stage& stage, if (iit != stage->iter_var_attrs.end()) { const IterVarAttr& attr = (*iit).second; if (!found_point) { - CHECK(!attr->bind_thread.defined()) << "Do not allow thread in tensorize scope"; + ICHECK(!attr->bind_thread.defined()) << "Do not allow thread in tensorize scope"; } if (attr->iter_type == kTensorized) { - CHECK(!found_point) << "Do not allow two tensorized point"; + ICHECK(!found_point) << "Do not allow two tensorized point"; found_point = true; loc_scope = i - 1; } } } - CHECK(found_point); + ICHECK(found_point); // Get domain of the tensorized scope. te::PassUpDomain(stage, dom_map, &up_state); // Get domains if inputs @@ -101,7 +101,7 @@ size_t InferTensorizeRegion(const ComputeOpNode* self, const Stage& stage, const Tensor& t = kv.first; for (size_t i = 0; i < t.ndim(); ++i) { Range r = arith::Union(kv.second.data.at(i)).CoverRange(none); - CHECK(r.defined()) << "cannot deduce region of tensorized scope for input " << t; + ICHECK(r.defined()) << "cannot deduce region of tensorized scope for input " << t; vec.push_back(std::move(r)); } (*in_region)[t] = std::move(vec); @@ -113,8 +113,8 @@ void VerifyTensorizeLoopNest(const ComputeOpNode* self, const Stage& stage, const ComputeLoopNest& n, size_t tloc) { // Veirfication step. std::unordered_set banned; - CHECK_EQ(n.main_nest.size(), stage->leaf_iter_vars.size() + 1); - CHECK(n.init_nest.size() == stage->leaf_iter_vars.size() + 1 || n.init_nest.size() == 0); + ICHECK_EQ(n.main_nest.size(), stage->leaf_iter_vars.size() + 1); + ICHECK(n.init_nest.size() == stage->leaf_iter_vars.size() + 1 || n.init_nest.size() == 0); auto f_push_banned = [&banned](const Stmt& s) { if (const ForNode* op = s.as()) { banned.insert(op->loop_var.get()); @@ -163,7 +163,7 @@ class TensorIntrinMatcher final : public StmtExprMutator { auto it = in_remap_.find(t); if (it != in_remap_.end()) { const InputEntry& e = it->second; - CHECK_EQ(op->indices.size(), e.region.size()); + ICHECK_EQ(op->indices.size(), e.region.size()); Array indices; for (size_t i = e.start; i < e.region.size(); ++i) { indices.push_back(op->indices[i] - e.region[i]->min); @@ -200,7 +200,7 @@ class TensorIntrinMatcher final : public StmtExprMutator { const std::unordered_map& out_dom, const std::unordered_map >& in_region, const TensorIntrin& intrin, Map* compute_intrin_iter_space) { - CHECK(self == stage->op.get()); + ICHECK(self == stage->op.get()); for (size_t i = 0; i < stage->leaf_iter_vars.size(); ++i) { IterVar iv = stage->leaf_iter_vars[i]; @@ -214,17 +214,17 @@ class TensorIntrinMatcher final : public StmtExprMutator { // input remap. Array inputs = self->InputTensors(); - CHECK_EQ(inputs.size(), intrin->inputs.size()); + ICHECK_EQ(inputs.size(), intrin->inputs.size()); for (size_t i = 0; i < inputs.size(); ++i) { InputEntry e; e.tensor = intrin->inputs[i]; e.region = Array(in_region.at(inputs[i])); - CHECK_GE(e.region.size(), e.tensor.ndim()); + ICHECK_GE(e.region.size(), e.tensor.ndim()); // Enable fuzzy matching, to match [1, n, m] to [n, m] e.start = e.region.size() - e.tensor.ndim(); for (size_t j = 0; j < e.start; ++j) { auto canonical_extent = analyzer_.Simplify(e.region[j]->extent); - CHECK(is_one(canonical_extent)) + ICHECK(is_one(canonical_extent)) << "Tensorize " << intrin->name << ":" << " Input dimension mismatch with tensor intrin " << " expected shape=" << e.tensor->shape << ", given region=" << e.region; @@ -233,16 +233,16 @@ class TensorIntrinMatcher final : public StmtExprMutator { } // output remap const ComputeOpNode* intrin_compute = intrin->op.as(); - CHECK(intrin_compute) << "Only support compute intrinsic for now"; - CHECK_GE(self->axis.size(), intrin_compute->axis.size()) + ICHECK(intrin_compute) << "Only support compute intrinsic for now"; + ICHECK_GE(self->axis.size(), intrin_compute->axis.size()) << "Tensorize: Output mismatch with tensor intrin "; // Enable fuzzy matching, to match [1, n, m] to [n, m] size_t axis_start = self->axis.size() - intrin_compute->axis.size(); for (size_t i = 0; i < axis_start; ++i) { Range r = out_dom.at(self->axis[i]); - CHECK(is_one(r->extent)) << "Tensorize: Output mismatch with tensor intrin " - << " intrin-dim=" << intrin_compute->axis.size() - << ", tensorize-dim=" << self->axis.size(); + ICHECK(is_one(r->extent)) << "Tensorize: Output mismatch with tensor intrin " + << " intrin-dim=" << intrin_compute->axis.size() + << ", tensorize-dim=" << self->axis.size(); var_remap_[self->axis[i]->var.get()] = r->min; } // Assume we tensorize at regin axis i [min, min + extent) @@ -257,14 +257,14 @@ class TensorIntrinMatcher final : public StmtExprMutator { compute_intrin_iter_space->Set(target_iv->var, target_iv->dom); } // Remap reduction axis - CHECK_GE(self->reduce_axis.size(), intrin_compute->reduce_axis.size()) + ICHECK_GE(self->reduce_axis.size(), intrin_compute->reduce_axis.size()) << "Tensorize: Reduction dimension mismatch with tensor intrin"; axis_start = self->reduce_axis.size() - intrin_compute->reduce_axis.size(); for (size_t i = 0; i < axis_start; ++i) { Range r = out_dom.at(self->reduce_axis[i]); - CHECK(is_one(r->extent)) << "Tensorize: Reduction mismatch with tensor intrin " - << " intrin-dim=" << intrin_compute->reduce_axis.size() - << ", tensorize-dim=" << self->reduce_axis.size(); + ICHECK(is_one(r->extent)) << "Tensorize: Reduction mismatch with tensor intrin " + << " intrin-dim=" << intrin_compute->reduce_axis.size() + << ", tensorize-dim=" << self->reduce_axis.size(); var_remap_[self->reduce_axis[i]->var.get()] = r->min; } for (size_t i = axis_start; i < self->reduce_axis.size(); ++i) { @@ -320,8 +320,8 @@ void VerifyTensorizeBody(const ComputeOpNode* self, const Stage& stage, Array body = MatchTensorizeBody(self, stage, dom_map, out_dom, in_region, intrin, &compute_intrin_iter_space); const ComputeOpNode* intrin_compute = intrin->op.as(); - CHECK(intrin_compute) << "Only support compute intrinsic for now"; - CHECK_EQ(body.size(), intrin_compute->body.size()) << "Tensorize failed: body size mismatch"; + ICHECK(intrin_compute) << "Only support compute intrinsic for now"; + ICHECK_EQ(body.size(), intrin_compute->body.size()) << "Tensorize failed: body size mismatch"; arith::Analyzer ana; ana.Bind(compute_intrin_iter_space); @@ -333,9 +333,9 @@ void VerifyTensorizeBody(const ComputeOpNode* self, const Stage& stage, << "'s declaration " << " provided=" << lhs.dtype() << ", intrin=" << rhs.dtype(); } - CHECK(expr_equal(lhs, rhs)) << "Failed to match the compute with TensorIntrin " << intrin->name - << "'s declaration " - << " provided= " << lhs << ", intrin= " << rhs; + ICHECK(expr_equal(lhs, rhs)) << "Failed to match the compute with TensorIntrin " << intrin->name + << "'s declaration " + << " provided= " << lhs << ", intrin= " << rhs; } } @@ -346,7 +346,7 @@ Stmt MakeTensorize(const ComputeOpNode* self, const Stage& stage, std::unordered_map > in_region; size_t tloc = InferTensorizeRegion(self, stage, dom_map, &out_dom, &in_region); TensorIntrin intrin = stage->iter_var_attrs.at(stage->leaf_iter_vars[tloc])->tensor_intrin; - CHECK(intrin.defined()); + ICHECK(intrin.defined()); ComputeLoopNest n = ComputeLoopNest::Create(self, stage, dom_map, debug_keep_trivial_loop); VerifyTensorizeLoopNest(self, stage, n, tloc); VerifyTensorizeBody(self, stage, dom_map, out_dom, in_region, intrin); @@ -354,14 +354,14 @@ Stmt MakeTensorize(const ComputeOpNode* self, const Stage& stage, Stmt nop = Evaluate(0); std::vector input_bind_nest, output_bind_nest; Array inputs = self->InputTensors(); - CHECK_EQ(inputs.size(), intrin->inputs.size()) << "Tensorize failed: input size mismatch "; + ICHECK_EQ(inputs.size(), intrin->inputs.size()) << "Tensorize failed: input size mismatch "; // input binding for (size_t i = 0; i < intrin->inputs.size(); ++i) { Tensor tensor = inputs[i]; Buffer buffer = intrin->buffers[i]; Array bind_spec{buffer, tensor}; auto it = in_region.find(tensor); - CHECK(it != in_region.end()); + ICHECK(it != in_region.end()); const Array& region = it->second; Array tuple; for (const Range r : region) { @@ -374,13 +374,13 @@ Stmt MakeTensorize(const ComputeOpNode* self, const Stage& stage, } // output binding const ComputeOpNode* intrin_compute = intrin->op.as(); - CHECK(intrin_compute) << "Only support compute intrinsic for now"; - CHECK_EQ(intrin->inputs.size() + intrin_compute->body.size(), intrin->buffers.size()); - CHECK_EQ(intrin_compute->body.size(), self->body.size()); + ICHECK(intrin_compute) << "Only support compute intrinsic for now"; + ICHECK_EQ(intrin->inputs.size() + intrin_compute->body.size(), intrin->buffers.size()); + ICHECK_EQ(intrin_compute->body.size(), self->body.size()); Array tuple; for (IterVar iv : self->axis) { auto it = out_dom.find(iv); - CHECK(it != out_dom.end()); + ICHECK(it != out_dom.end()); tuple.push_back(it->second->min); tuple.push_back(it->second->extent); } @@ -395,20 +395,20 @@ Stmt MakeTensorize(const ComputeOpNode* self, const Stage& stage, // Check variable remap std::unordered_map vmap; tir::ArgBinder binder(&vmap); - CHECK_GE(self->reduce_axis.size(), intrin_compute->reduce_axis.size()) + ICHECK_GE(self->reduce_axis.size(), intrin_compute->reduce_axis.size()) << "Tensorization fail: reduction axis size do not match"; size_t start = self->reduce_axis.size() - intrin_compute->reduce_axis.size(); for (size_t i = 0; i < start; ++i) { IterVar iv = self->reduce_axis[i]; auto it = out_dom.find(iv); - CHECK(it != out_dom.end()); - CHECK(is_one(it->second->extent)) << "Tensorization fail: reduction axis size do not match"; + ICHECK(it != out_dom.end()); + ICHECK(is_one(it->second->extent)) << "Tensorization fail: reduction axis size do not match"; } for (size_t i = start; i < self->reduce_axis.size(); ++i) { IterVar iv = self->reduce_axis[i]; IterVar target = intrin_compute->reduce_axis[i - start]; auto it = out_dom.find(iv); - CHECK(it != out_dom.end()); + ICHECK(it != out_dom.end()); binder.Bind(target->dom->min, make_const(iv->dom->min.dtype(), 0), "tensir_intrin.reduction.min"); binder.Bind(target->dom->extent, it->second->extent, "tensir_intrin.reduction.extent"); @@ -417,8 +417,8 @@ Stmt MakeTensorize(const ComputeOpNode* self, const Stage& stage, // Do no need to split reduction std::vector > nest(n.main_nest.begin(), n.main_nest.begin() + tloc + 1); nest.emplace_back(MakeIfNest(n.main_predicates)); - CHECK_EQ(n.init_predicates.size(), 0U); - CHECK(intrin->body.defined()) << "Normal store op for intrin " << intrin << " is not defined"; + ICHECK_EQ(n.init_predicates.size(), 0U); + ICHECK(intrin->body.defined()) << "Normal store op for intrin " << intrin << " is not defined"; Stmt body = MergeNest(output_bind_nest, intrin->body); body = MergeNest(input_bind_nest, body); body = tir::Substitute(body, vmap); @@ -427,10 +427,10 @@ Stmt MakeTensorize(const ComputeOpNode* self, const Stage& stage, return MergeNest(nest, body); } else { // Need to split reduction - CHECK(intrin->reduce_update.defined()) + ICHECK(intrin->reduce_update.defined()) << "Reduction update op for intrin " << intrin << " is not defined"; // Need init and update steps - CHECK_NE(self->reduce_axis.size(), 0U); + ICHECK_NE(self->reduce_axis.size(), 0U); std::vector > common(n.main_nest.begin(), n.main_nest.begin() + n.num_common_loop + 1); std::vector > update_nest(n.main_nest.begin() + n.num_common_loop + 1, @@ -455,7 +455,7 @@ Stmt MakeTensorize(const ComputeOpNode* self, const Stage& stage, return MergeNest(common, SeqStmt::Flatten(init, update)); } else { // When init op is not available, use body op for reset in the first iter. - CHECK(intrin->body.defined()) << "Normal body op for intrin " << intrin << " is not defined"; + ICHECK(intrin->body.defined()) << "Normal body op for intrin " << intrin << " is not defined"; Stmt update = TransformUpdate(stage, dom_map, n, intrin->body, intrin->reduce_update); update = MergeNest(output_bind_nest, update); update = MergeNest(input_bind_nest, update); @@ -474,7 +474,7 @@ TVM_REGISTER_GLOBAL("test.op.InferTensorizeRegion").set_body([](TVMArgs args, TV Map dmap = args[1]; std::unordered_map out_dom; std::unordered_map > in_region; - CHECK(stage->op.as()); + ICHECK(stage->op.as()); InferTensorizeRegion(stage->op.as(), stage, as_unordered_map(dmap), &out_dom, &in_region); *ret = Array{Map(out_dom), Map >(in_region)}; @@ -486,7 +486,7 @@ TVM_REGISTER_GLOBAL("test.op.MatchTensorizeBody").set_body([](TVMArgs args, TVMR Map > in_region = args[2]; TensorIntrin intrin = args[3]; Map vrange; - CHECK(stage->op.as()); + ICHECK(stage->op.as()); *ret = MatchTensorizeBody(stage->op.as(), stage, {{}}, as_unordered_map(out_dom), as_unordered_map(in_region), intrin, &vrange); }); diff --git a/src/te/schedule/bound.cc b/src/te/schedule/bound.cc index 83a1caf3c63a..12c9b5538b44 100644 --- a/src/te/schedule/bound.cc +++ b/src/te/schedule/bound.cc @@ -89,16 +89,16 @@ StorageScope InferStorageScope(const Stage& stage, const GraphContext& ctx) { void InferRootBound(const Stage& stage, const GraphContext& ctx, std::unordered_map* rmap) { - CHECK_NE(stage->attach_type, kInline) << "call schedule.normalize before scheduleops"; + ICHECK_NE(stage->attach_type, kInline) << "call schedule.normalize before scheduleops"; if (stage->attach_type == kInlinedAlready) return; if (stage->is_output) { // verify correctness. - CHECK_EQ(stage.GetAttachSpec()->attach_type, kGroupRoot) << "Output must be attached at root"; + ICHECK_EQ(stage.GetAttachSpec()->attach_type, kGroupRoot) << "Output must be attached at root"; } if (stage->is_output || stage->op.as()) { for (auto iv : stage->op->root_iter_vars()) { - CHECK(iv->dom.defined()); - CHECK(!rmap->count(iv)); + ICHECK(iv->dom.defined()); + ICHECK(!rmap->count(iv)); (*rmap)[iv] = iv->dom; } return; @@ -132,7 +132,7 @@ void InferRootBound(const Stage& stage, const GraphContext& ctx, Map relax_set; std::unordered_map up_state; bool found_attach = false; - CHECK(ctx.op2stage_.count(op.get())); + ICHECK(ctx.op2stage_.count(op.get())); const Stage& op_stage = ctx.op2stage_.at(op.get()); // Consumer nest for (size_t i = op_stage->leaf_iter_vars.size(); i != 0; --i) { @@ -141,13 +141,13 @@ void InferRootBound(const Stage& stage, const GraphContext& ctx, found_attach = true; } auto it = rmap->find(iv); - CHECK(it != rmap->end()); + ICHECK(it != rmap->end()); const Range& vrange = it->second; if (is_one(vrange->extent)) { up_state[iv] = IntSet::SinglePoint(vrange->min); } else if (!NeedRelax(iv, found_attach, ctx.bind_map, scope)) { - CHECK(is_zero(vrange->min)) << "InferBound requires every leaf iter var's min equals 0, " - << " call schedule.normalize to achieve this. "; + ICHECK(is_zero(vrange->min)) << "InferBound requires every leaf iter var's min equals 0, " + << " call schedule.normalize to achieve this. "; if (ctx.bind_map.count(iv)) { up_state[iv] = IntSet::SinglePoint(ctx.bind_map.at(iv)->var); } else { @@ -163,8 +163,8 @@ void InferRootBound(const Stage& stage, const GraphContext& ctx, found_attach = true; } Range vrange = rmap->at(iv); - CHECK(is_zero(vrange->min)) << "InferBound requires every leaf iter var's min equals 0, " - << "call schedule.normalize to achieve this."; + ICHECK(is_zero(vrange->min)) << "InferBound requires every leaf iter var's min equals 0, " + << "call schedule.normalize to achieve this."; if (NeedRelax(iv, found_attach, ctx.bind_map, scope)) { relax_set.Set(iv->var, IntSet::FromRange(vrange)); if (ctx.bind_map.count(iv)) { @@ -172,7 +172,7 @@ void InferRootBound(const Stage& stage, const GraphContext& ctx, } } } - CHECK(found_attach || stage_attach.size() == 0) + ICHECK(found_attach || stage_attach.size() == 0) << "Invalid Schedule, cannot find the producer " << stage->op << " along the loop nest specified by compute_at of consumer " << op; // Get the domain of the consumer @@ -218,7 +218,7 @@ Map InferBound(const Schedule& sch) { for (Stage stage : sch->stages) { for (auto kv : stage->iter_var_attrs) { if (kv.second->bind_thread.defined()) { - CHECK(!ctx.bind_map.count(kv.first)); + ICHECK(!ctx.bind_map.count(kv.first)); ctx.bind_map[kv.first] = kv.second->bind_thread; } } @@ -242,7 +242,7 @@ Map InferBound(const Schedule& sch) { // pass down to get bound of all iter vars. PassDownDomain(stage, &ret, &analyzer); for (IterVar iv : stage->env_threads) { - CHECK(iv->dom.defined()); + ICHECK(iv->dom.defined()); ret[iv] = iv->dom; } } diff --git a/src/te/schedule/graph.cc b/src/te/schedule/graph.cc index 09e899581d14..502753284da6 100644 --- a/src/te/schedule/graph.cc +++ b/src/te/schedule/graph.cc @@ -174,7 +174,7 @@ AttachPath CreateAttachPath(Schedule sch) { std::unordered_set visited; Array path; for (Stage s = stage; s.defined();) { - CHECK(!visited.count(s.get())) << "Find loop in compute_at attach group"; + ICHECK(!visited.count(s.get())) << "Find loop in compute_at attach group"; visited.insert(s.get()); Stage spec = s.GetAttachSpec(); bool start_attach; @@ -183,14 +183,14 @@ AttachPath CreateAttachPath(Schedule sch) { attach_ivar = spec->attach_ivar; s = spec->attach_stage; start_attach = false; - CHECK(attach_ivar.defined()); + ICHECK(attach_ivar.defined()); } else if (spec->attach_type == kScanUpdate) { s = spec->attach_stage; start_attach = true; } else { break; } - CHECK(s.defined()); + ICHECK(s.defined()); for (size_t i = s->leaf_iter_vars.size(); i != 0; --i) { IterVar iv = s->leaf_iter_vars[i - 1]; if (!start_attach && iv.same_as(attach_ivar)) { @@ -198,8 +198,8 @@ AttachPath CreateAttachPath(Schedule sch) { } if (start_attach) path.push_back(iv); } - CHECK(start_attach) << "Invalid Schedule: cannot find attach point " << attach_ivar - << " in the schedule of " << s->op; + ICHECK(start_attach) << "Invalid Schedule: cannot find attach point " << attach_ivar + << " in the schedule of " << s->op; } if (!ret.count(stage->op)) { ret.Set(stage->op, path); diff --git a/src/te/schedule/message_passing.cc b/src/te/schedule/message_passing.cc index 0a82673aa4b8..d45f29ebc5b6 100644 --- a/src/te/schedule/message_passing.cc +++ b/src/te/schedule/message_passing.cc @@ -40,9 +40,9 @@ void Update(std::unordered_map* p_state, const IterVar& iv, Rang } else { bool match = is_zero(it->second->min) && analyzer->CanProve(r->extent - it->second->extent == 0); - CHECK(match) << iv << " domain already inferred," - << " cannot prove their extents are the same " << it->second->extent << " vs " - << r->extent; + ICHECK(match) << iv << " domain already inferred," + << " cannot prove their extents are the same " << it->second->extent << " vs " + << r->extent; } } @@ -109,10 +109,10 @@ void PassDownDomain(const Stage& stage, std::unordered_map* p_st for (IterVarRelation rel : stage->relations) { if (const SplitNode* r = rel.as()) { if (!state.count(r->parent)) { - CHECK(allow_missing); + ICHECK(allow_missing); continue; } - CHECK(!state.count(r->inner)); + ICHECK(!state.count(r->inner)); const Range& range_parent = state.at(r->parent); // Tighten iv's extent to min(parent_extent, factor_or_nparts), only if all of the // following conditions are met: @@ -143,7 +143,7 @@ void PassDownDomain(const Stage& stage, std::unordered_map* p_st } } else if (const FuseNode* r = rel.as()) { if (!state.count(r->outer) || !state.count(r->inner)) { - CHECK(allow_missing); + ICHECK(allow_missing); continue; } const Range& range_outer = state.at(r->outer); @@ -151,7 +151,7 @@ void PassDownDomain(const Stage& stage, std::unordered_map* p_st state[r->fused] = Range::FromMinExtent(0, range_outer->extent * range_inner->extent); } else if (const RebaseNode* r = rel.as()) { if (!state.count(r->parent)) { - CHECK(allow_missing); + ICHECK(allow_missing); continue; } Update(p_state, r->rebased, Range::FromMinExtent(0, state.at(r->parent)->extent), actx); @@ -164,7 +164,7 @@ void PassDownDomain(const Stage& stage, std::unordered_map* p_st // update the extents of binded threads. for (auto kv : stage->iter_var_attrs) { if (kv.second->bind_thread.defined()) { - CHECK(state.count(kv.first)); + ICHECK(state.count(kv.first)); Update(p_state, kv.second->bind_thread, state.at(kv.first), actx); } } @@ -177,7 +177,7 @@ void PassUpIndex(const Stage& stage, const Map& dom_map, IterVarRelation rel = stage->relations[i - 1]; if (const SplitNode* s = rel.as()) { if (!state.count(s->outer) || !state.count(s->inner)) { - CHECK(allow_missing); + ICHECK(allow_missing); continue; } PrimExpr outer = state.at(s->outer); @@ -191,7 +191,7 @@ void PassUpIndex(const Stage& stage, const Map& dom_map, } } else if (const FuseNode* s = rel.as()) { if (!state.count(s->fused)) { - CHECK(allow_missing); + ICHECK(allow_missing); continue; } PrimExpr value = state.at(s->fused); @@ -213,7 +213,7 @@ void PassUpIndex(const Stage& stage, const Map& dom_map, state[s->inner] = cast(s->inner->var.dtype(), state[s->inner]); } else if (const RebaseNode* s = rel.as()) { if (!state.count(s->rebased)) { - CHECK(allow_missing); + ICHECK(allow_missing); continue; } PrimExpr value = state.at(s->rebased); @@ -237,18 +237,18 @@ void PassDownIndex(const Stage& stage, const Map& dom_map, for (IterVarRelation rel : stage->relations) { if (const SplitNode* s = rel.as()) { if (!state.count(s->parent)) { - CHECK(allow_missing); + ICHECK(allow_missing); continue; } Range r = dom_map.at(s->inner); - CHECK(is_zero(r->min)); + ICHECK(is_zero(r->min)); PrimExpr parent = state.at(s->parent); PrimExpr factor = r->extent; state[s->outer] = indexdiv(parent, factor); state[s->inner] = indexmod(parent, factor); } else if (const FuseNode* s = rel.as()) { if (!state.count(s->inner) && !state.count(s->outer)) { - CHECK(allow_missing); + ICHECK(allow_missing); continue; } PrimExpr factor = dom_map.at(s->inner)->extent; @@ -256,17 +256,17 @@ void PassDownIndex(const Stage& stage, const Map& dom_map, PrimExpr inner_min = dom_map.at(s->inner)->min; PrimExpr inner = state.at(s->inner); PrimExpr outer = state.at(s->outer); - CHECK(is_zero(outer_min)); - CHECK(is_zero(inner_min)); + ICHECK(is_zero(outer_min)); + ICHECK(is_zero(inner_min)); state[s->fused] = outer * factor + inner; } else if (const RebaseNode* s = rel.as()) { if (!state.count(s->rebased)) { - CHECK(allow_missing); + ICHECK(allow_missing); continue; } PrimExpr value = state.at(s->parent); PrimExpr parent_min = dom_map.at(s->parent)->min; - CHECK(is_zero(parent_min)); + ICHECK(is_zero(parent_min)); state[s->rebased] = value; } else if (const SingletonNode* s = rel.as()) { state[s->iter] = make_zero(s->iter->var.dtype()); @@ -286,18 +286,18 @@ void PassUpDomain(const SplitNode* s, const std::unordered_map& } PrimExpr factor = dom_map.at(s->inner)->extent; PrimExpr parent_min = dom_map.at(s->parent)->min; - CHECK(outer.defined()); - CHECK(inner.defined()); - CHECK(factor.defined()); + ICHECK(outer.defined()); + ICHECK(inner.defined()); + ICHECK(factor.defined()); *parent = arith::EvalSet(s->outer->var * factor + s->inner->var + parent_min, {{s->outer, outer}, {s->inner, inner}}); } void PassUpDomain(const FuseNode* s, const std::unordered_map& dom_map, const IntSet& fused, IntSet* outer, IntSet* inner) { - CHECK(dom_map.count(s->outer)); - CHECK(dom_map.count(s->inner)); - CHECK(dom_map.count(s->fused)); + ICHECK(dom_map.count(s->outer)); + ICHECK(dom_map.count(s->inner)); + ICHECK(dom_map.count(s->fused)); arith::Analyzer ana; if (fused.MatchRange(dom_map.at(s->fused))) { @@ -342,7 +342,7 @@ void PassUpDomain(const FuseNode* s, const std::unordered_map& d void PassUpDomain(const RebaseNode* s, const std::unordered_map& dom_map, const IntSet& rebased, IntSet* parent) { - CHECK(dom_map.count(s->parent)); + ICHECK(dom_map.count(s->parent)); if (rebased.MatchRange(dom_map.at(s->rebased))) { *parent = IntSet::FromRange(dom_map.at(s->parent)); return; @@ -384,7 +384,7 @@ void PassUpBitMaskOr(const Stage& stage, std::unordered_map* p_sta IterVarRelation rel = stage->relations[i - 1]; if (const SplitNode* s = rel.as()) { if (!state.count(s->inner) && !state.count(s->outer)) { - CHECK(allow_missing); + ICHECK(allow_missing); continue; } int res = 0; @@ -394,7 +394,7 @@ void PassUpBitMaskOr(const Stage& stage, std::unordered_map* p_sta state[s->parent] = res; } else if (const FuseNode* s = rel.as()) { if (!state.count(s->fused)) { - CHECK(allow_missing); + ICHECK(allow_missing); continue; } if (!state.count(s->outer)) { @@ -409,7 +409,7 @@ void PassUpBitMaskOr(const Stage& stage, std::unordered_map* p_sta } } else if (const RebaseNode* s = rel.as()) { if (!state.count(s->rebased)) { - CHECK(allow_missing); + ICHECK(allow_missing); continue; } if (!state.count(s->parent)) { @@ -430,7 +430,7 @@ void PassDownBitMaskOr(const Stage& stage, std::unordered_map* p_s for (IterVarRelation rel : stage->relations) { if (const SplitNode* s = rel.as()) { if (!state.count(s->parent)) { - CHECK(allow_missing); + ICHECK(allow_missing); continue; } if (!state.count(s->outer)) { @@ -445,7 +445,7 @@ void PassDownBitMaskOr(const Stage& stage, std::unordered_map* p_s } } else if (const FuseNode* s = rel.as()) { if (!state.count(s->outer) && !state.count(s->inner)) { - CHECK(allow_missing); + ICHECK(allow_missing); continue; } int res = 0; @@ -455,7 +455,7 @@ void PassDownBitMaskOr(const Stage& stage, std::unordered_map* p_s state[s->fused] = res; } else if (const RebaseNode* s = rel.as()) { if (!state.count(s->parent)) { - CHECK(allow_missing); + ICHECK(allow_missing); continue; } if (!state.count(s->rebased)) { @@ -561,7 +561,7 @@ std::vector MakeBoundCheck(const Stage& stage, const Mapop->root_iter_vars()) { if (skip_iter.count(iv) || iv->iter_type == kOpaque) continue; Range dom = dom_map.at(iv); - CHECK(iv->dom.defined()); + ICHECK(iv->dom.defined()); if (!skip_ivar_domain && !IsRangeSame(iv->dom, dom)) { PrimExpr value = value_map.at(iv) - iv->dom->min; IntSet s = analyzer.int_set(value, iset_dmap); diff --git a/src/te/schedule/operation_inline.cc b/src/te/schedule/operation_inline.cc index 01d93c5ec8bd..8eed6e3f10fc 100644 --- a/src/te/schedule/operation_inline.cc +++ b/src/te/schedule/operation_inline.cc @@ -48,9 +48,9 @@ class OperationInliner final : public StmtExprMutator { auto tensor = Downcast(op->producer); if (tensor->op.same_as(operation_)) { - CHECK_EQ(tensor->value_index, 0); + ICHECK_EQ(tensor->value_index, 0); expr = body_; - CHECK_EQ(args_.size(), op->indices.size()); + ICHECK_EQ(args_.size(), op->indices.size()); bool has_side_effect = false; for (size_t i = 0; i < op->indices.size(); ++i) { @@ -81,7 +81,7 @@ class OperationInliner final : public StmtExprMutator { }; Stmt Inline(Stmt stmt, Operation f, Array args, PrimExpr body) { - CHECK_EQ(f->num_outputs(), 1) << "can only inline output single value operation"; + ICHECK_EQ(f->num_outputs(), 1) << "can only inline output single value operation"; Stmt ret = OperationInliner(f, args, body)(std::move(stmt)); if (ret.same_as(stmt)) return ret; return ConvertSSA(ret); diff --git a/src/te/schedule/schedule_dataflow_rewrite.cc b/src/te/schedule/schedule_dataflow_rewrite.cc index 941817a5d954..6aac3b769a47 100644 --- a/src/te/schedule/schedule_dataflow_rewrite.cc +++ b/src/te/schedule/schedule_dataflow_rewrite.cc @@ -163,7 +163,7 @@ Tensor Schedule::cache_read(const Tensor& tensor, const std::string& scope, for (Operation op : readers) { Stage s = operator[](op); Operation repl_op = s->op->ReplaceInputs(s->op, vsub); - CHECK(!repl_op.same_as(s->op)) << "Cannot find " << tensor << " in the inputs of " << s->op; + ICHECK(!repl_op.same_as(s->op)) << "Cannot find " << tensor << " in the inputs of " << s->op; vmap[s->op.output(0)] = repl_op.output(0); rvmap[repl_op.output(0)] = s->op.output(0); s->op = repl_op; @@ -174,7 +174,7 @@ Tensor Schedule::cache_read(const Tensor& tensor, const std::string& scope, size_t pos = FindNodeRef(stages.GetArrayNode(), op_stage); Stage cache_stage = Stage(cache->op); cache_stage.set_scope(scope); - CHECK_LT(pos, stages.size()); + ICHECK_LT(pos, stages.size()); stages.insert(stages.begin() + pos + 1, cache_stage); (*this)->stage_map.Set(cache->op, cache_stage); // Update group @@ -212,7 +212,7 @@ void PrepareAxisMapping(Stage orig_stage, OpType* op, std::unordered_set value_map; for (IterVar iv : orig_stage->leaf_iter_vars) { if (red_axis.count(iv)) continue; - CHECK_EQ(iv->iter_type, kDataPar) << "Can only relayout with in data parallel dimensions"; + ICHECK_EQ(iv->iter_type, kDataPar) << "Can only relayout with in data parallel dimensions"; Range dom = dom_map.at(iv); IterVar new_iv = IterVar(dom, iv->var.copy_with_suffix(".c"), iv->iter_type); new_axis.push_back(new_iv); @@ -266,7 +266,7 @@ Array ReplaceOriginalOp(Schedule sch, Stage orig_stage, const std::strin size_t pos = FindNodeRef(stages.GetArrayNode(), orig_stage); Stage cache_stage = Stage(cache_op); cache_stage.set_scope(scope); - CHECK_LT(pos, stages.size()); + ICHECK_LT(pos, stages.size()); stages.insert(stages.begin() + pos, cache_stage); sch->stage_map.Set(cache_op, cache_stage); // Update group @@ -309,14 +309,14 @@ Array CacheWriteWithReLayout(Schedule sch, const Array& tensor_a if (body->IsInstance()) { const tir::ReduceNode* reduce_body = body.as(); if (first_reduce != nullptr) { - CHECK(ReduceEqual(reduce_body, first_reduce)); + ICHECK(ReduceEqual(reduce_body, first_reduce)); body = tir::Reduce(first_reduce->combiner, first_reduce->source, first_reduce->axis, first_reduce->condition, reduce_body->value_index, reduce_body->init); } else { first_reduce = reduce_body; } } else { - CHECK(first_reduce == nullptr) << "cannot mix reduce and other node in ONE compute bodys"; + ICHECK(first_reduce == nullptr) << "cannot mix reduce and other node in ONE compute bodys"; } body_list.push_back(body); } @@ -355,7 +355,7 @@ Array CacheWriteWithReLayoutTensor(Schedule sch, const Array& te Tensor tensor = tensor_array[0]; Stage orig_stage = sch[tensor->op]; const TensorComputeOpNode* tensor_op = orig_stage->op.as(); - CHECK_EQ(tensor_op->num_outputs(), 1) + ICHECK_EQ(tensor_op->num_outputs(), 1) << "cache write only support single output tensor_compute_op"; std::unordered_set red_axis; @@ -435,15 +435,15 @@ Array CacheWriteWithReLayoutTensor(Schedule sch, const Array& te Array Schedule::cache_write(const Array& tensor_array, const std::string& scope) { (*this)->InvalidateCache(); - CHECK(tensor_array.size() > 0) << "size of tensor_array must be greater than 0"; + ICHECK(tensor_array.size() > 0) << "size of tensor_array must be greater than 0"; Tensor tensor = tensor_array[0]; Stage orig_stage = operator[](tensor->op); const ComputeOpNode* compute = tensor->op.as(); - CHECK(static_cast(compute->num_outputs()) == tensor_array.size()) + ICHECK(static_cast(compute->num_outputs()) == tensor_array.size()) << "size of input tensor list must be same as number of stage outputs"; for (size_t i = 1; i < tensor_array.size(); i++) { Stage tmp_stage = operator[](tensor_array[i]->op); - CHECK(orig_stage.same_as(tmp_stage)) << "Input tensor list must be generated by ONE computeOp"; + ICHECK(orig_stage.same_as(tmp_stage)) << "Input tensor list must be generated by ONE computeOp"; } return CacheWriteWithReLayout(*this, tensor_array, scope); } @@ -519,11 +519,11 @@ void InjectInline(ScheduleNode* sch) { { // setup args const ComputeOpNode* compute = stage->op.as(); - CHECK(compute) << "can only inline compute op"; + ICHECK(compute) << "can only inline compute op"; for (auto iv : compute->axis) { args.push_back(iv->var); } - CHECK_EQ(compute->body.size(), 1U) << "can only inline compute op with 1 output"; + ICHECK_EQ(compute->body.size(), 1U) << "can only inline compute op with 1 output"; body = compute->body[0]; } for (size_t j = i; j < sch->stages.size(); ++j) { @@ -539,9 +539,9 @@ void InjectInline(ScheduleNode* sch) { const tir::ReduceNode* reduce = new_body[j][0].as(); for (size_t k = 1; k < new_body[j].size(); ++k) { const tir::ReduceNode* reduce_ = new_body[j][k].as(); - CHECK(reduce_); - CHECK(ReduceEqual(reduce_, reduce)) << "The Reduce inputs of ComputeOp should " - << "have the same attribute except value_index"; + ICHECK(reduce_); + ICHECK(ReduceEqual(reduce_, reduce)) << "The Reduce inputs of ComputeOp should " + << "have the same attribute except value_index"; } PrimExpr new_value = Inline(tir::Evaluate(new_body[j][0]), stage->op, args, body) .as() @@ -549,8 +549,8 @@ void InjectInline(ScheduleNode* sch) { if (!new_value.same_as(new_body[j][0])) { changed[j] = true; const tir::ReduceNode* r = new_value.as(); - CHECK(r != nullptr); - CHECK_EQ(new_body[j].size(), r->source.size()); + ICHECK(r != nullptr); + ICHECK_EQ(new_body[j].size(), r->source.size()); for (size_t k = 0; k < new_body[j].size(); ++k) { auto n = make_object(*r); n->value_index = static_cast(k); @@ -590,7 +590,7 @@ void InjectInline(ScheduleNode* sch) { if (new_body[i].size()) { // Logics from ReplaceDataFlow const ComputeOpNode* compute = sch->stages[i]->op.as(); - CHECK(compute); + ICHECK(compute); Operation op = s->op; if (changed[i]) { op = ComputeOp(compute->name, compute->tag, compute->attrs, compute->axis, new_body[i]); @@ -604,7 +604,7 @@ void InjectInline(ScheduleNode* sch) { } } else if (hybrid_changed[i]) { const HybridOpNode* hybrid = sch->stages[i]->op.as(); - CHECK(hybrid); + ICHECK(hybrid); Operation op = HybridOp(hybrid->name, hybrid->tag, hybrid->attrs, hybrid->inputs, hybrid->outputs, new_hybrid_body[i]); op = op->ReplaceInputs(op, repl); @@ -647,8 +647,8 @@ void LegalizeInvalidAttach(ScheduleNode* sch) { bool start_attach = false; IterVar attach_ivar = spec->attach_ivar; s = spec->attach_stage; - CHECK(attach_ivar.defined()); - CHECK(s.defined()); + ICHECK(attach_ivar.defined()); + ICHECK(s.defined()); for (size_t i = s->leaf_iter_vars.size(); i != 0; --i) { IterVar iv = s->leaf_iter_vars[i - 1]; @@ -710,14 +710,15 @@ Schedule Schedule::normalize() { Array Schedule::rfactor(const Tensor& tensor, const IterVar& axis, int factor_axis) { (*this)->InvalidateCache(); using tir::ReduceNode; - CHECK_EQ(axis->iter_type, kCommReduce) << "Can only factor reduction axis"; + ICHECK_EQ(axis->iter_type, kCommReduce) << "Can only factor reduction axis"; Stage reduce_stage = operator[](tensor->op); const ComputeOpNode* compute_op = reduce_stage->op.as(); - CHECK(compute_op) << "Can only factor ComputeOp"; + ICHECK(compute_op) << "Can only factor ComputeOp"; ArrayNode* leaf_vars = reduce_stage->leaf_iter_vars.CopyOnWrite(); { size_t axis_pos = FindNodeRef(leaf_vars, axis); - CHECK_NE(axis_pos, leaf_vars->size()) << "Cannot find IterVar " << axis << " in leaf iter vars"; + ICHECK_NE(axis_pos, leaf_vars->size()) + << "Cannot find IterVar " << axis << " in leaf iter vars"; } // Find touched reduction axis. std::unordered_map touch_map; @@ -728,7 +729,7 @@ Array Schedule::rfactor(const Tensor& tensor, const IterVar& axis, int f std::unordered_set skip_bound_check; // Verify normal axis are not touched. for (IterVar iv : compute_op->axis) { - CHECK(!touch_map.count(iv)) << "Factor axis touches normal axis."; + ICHECK(!touch_map.count(iv)) << "Factor axis touches normal axis."; skip_bound_check.insert(iv); } // get analyzer. @@ -762,14 +763,14 @@ Array Schedule::rfactor(const Tensor& tensor, const IterVar& axis, int f // Get the factored op node. const int factor_axis_pos = factor_axis >= 0 ? factor_axis : static_cast(compute_op->axis.size() + 1) + factor_axis; - CHECK_LE(factor_axis_pos, compute_op->axis.size()); + ICHECK_LE(factor_axis_pos, compute_op->axis.size()); auto n = make_object(); n->name = compute_op->name + ".rf"; { // axis relacement. auto iv_node = make_object(); iv_node->dom = dom_map.at(axis); - CHECK(is_zero(iv_node->dom->min)) << "Can only factor reduction domain starting from 0"; + ICHECK(is_zero(iv_node->dom->min)) << "Can only factor reduction domain starting from 0"; iv_node->var = axis->var; iv_node->iter_type = kDataPar; @@ -787,7 +788,7 @@ Array Schedule::rfactor(const Tensor& tensor, const IterVar& axis, int f // predicate generation, copy not touched axis. int idx = tensor->value_index; const ReduceNode* reduce = compute_op->body[idx].as(); - CHECK(reduce) << "Can only rfactor non-inline reductions"; + ICHECK(reduce) << "Can only rfactor non-inline reductions"; predicates.push_back(reduce->condition); auto fand = [](PrimExpr a, PrimExpr b) { return a && b; }; @@ -799,7 +800,7 @@ Array Schedule::rfactor(const Tensor& tensor, const IterVar& axis, int f if (!touch_map.count(iv)) { n->reduce_axis.push_back(iv); } else { - CHECK(value_map.count(iv)); + ICHECK(value_map.count(iv)); PrimExpr index = value_map.at(iv); vsub[iv->var.get()] = index; } @@ -808,7 +809,7 @@ Array Schedule::rfactor(const Tensor& tensor, const IterVar& axis, int f // Copy touched axis. for (IterVar iv : reduce_stage->leaf_iter_vars) { if (touch_map.count(iv) && !iv.same_as(axis)) { - CHECK_EQ(iv->iter_type, kCommReduce); + ICHECK_EQ(iv->iter_type, kCommReduce); auto ncpy = make_object(*iv.operator->()); ncpy->dom = dom_map.at(iv); n->reduce_axis.push_back(IterVar(ncpy)); @@ -848,7 +849,7 @@ Array Schedule::rfactor(const Tensor& tensor, const IterVar& axis, int f size_t stage_pos = FindNodeRef(stages.GetArrayNode(), reduce_stage); Stage factor_stage = Stage(factor_op); factor_stage->relations = rels; - CHECK_LT(stage_pos, stages.size()); + ICHECK_LT(stage_pos, stages.size()); stages.insert(stages.begin() + stage_pos, factor_stage); (*this)->stage_map.Set(factor_op, factor_stage); factor_stage->group = reduce_stage->group; @@ -880,7 +881,7 @@ Array Schedule::rfactor(const Tensor& tensor, const IterVar& axis, int f std::unordered_map init_vsub; for (const auto& init : reduce->init) { if (init->IsInstance()) { - CHECK_EQ(compute_op->axis.size(), idx_size) + ICHECK_EQ(compute_op->axis.size(), idx_size) << "'init' should have the number of dimensions as output when using with " "rfactor"; for (int idx = 0; idx < idx_size; idx++) { diff --git a/src/te/schedule/schedule_lang.cc b/src/te/schedule/schedule_lang.cc index a8257c07a473..8964c1013a53 100644 --- a/src/te/schedule/schedule_lang.cc +++ b/src/te/schedule/schedule_lang.cc @@ -58,8 +58,8 @@ size_t FindLeafVar(ArrayNode* all_vars, ArrayNode* leaf_vars, const IterVar& v) DataType MatchDataType(std::vector dtypes) { int max_bits = -1; for (const auto& dtype : dtypes) { - CHECK(dtype.is_int()); - CHECK(dtype.is_scalar()); + ICHECK(dtype.is_int()); + ICHECK(dtype.is_scalar()); max_bits = std::max(max_bits, dtype.bits()); } return DataType::Int(max_bits); @@ -68,8 +68,8 @@ DataType MatchDataType(std::vector dtypes) { void SplitHelper(StageNode* self, IterVar parent, PrimExpr factor, PrimExpr nparts, IterVar* p_outer, IterVar* p_inner) { // Check if split is valid. - CHECK(parent->iter_type == kDataPar || parent->iter_type == kCommReduce || - parent->iter_type == kOrdered) + ICHECK(parent->iter_type == kDataPar || parent->iter_type == kCommReduce || + parent->iter_type == kOrdered) << "Cannot split on " << IterVarType2String(parent->iter_type); IterVar outer = IterVar(Range(), parent->var.copy_with_suffix(".outer"), parent->iter_type); IterVar inner = IterVar(Range(), parent->var.copy_with_suffix(".inner"), parent->iter_type); @@ -127,7 +127,7 @@ Stage& Stage::set_scope(std::string scope) { // NOLINT(*) } Stage& Stage::compute_at(Stage parent, IterVar scope) { // NOLINT(*) - CHECK_NE((*this)->attach_type, kScanUpdate) << "Cannot specify compute_at for scan updates"; + ICHECK_NE((*this)->attach_type, kScanUpdate) << "Cannot specify compute_at for scan updates"; // Group constraint checking. Stage group = (*this)->group; if (group.defined()) { @@ -135,7 +135,7 @@ Stage& Stage::compute_at(Stage parent, IterVar scope) { // NOLINT(*) while (pg.defined() && !pg.same_as(group)) { pg = pg->group; } - CHECK(pg.same_as(group)) << "Can only assign compute_at to stages within the same group"; + ICHECK(pg.same_as(group)) << "Can only assign compute_at to stages within the same group"; } (*this)->attach_type = kScope; @@ -148,28 +148,28 @@ Stage& Stage::compute_at(Stage parent, IterVar scope) { // NOLINT(*) break; } } - CHECK(found) << "Cannot find the axis " << scope << " in parent's leaf_iter_vars" - << " parent=" << parent; + ICHECK(found) << "Cannot find the axis " << scope << " in parent's leaf_iter_vars" + << " parent=" << parent; return *this; } Stage& Stage::compute_inline() { // NOLINT(*) - CHECK_NE((*this)->attach_type, kScanUpdate) << "Cannot specify compute_at for scan updates"; + ICHECK_NE((*this)->attach_type, kScanUpdate) << "Cannot specify compute_at for scan updates"; (*this)->attach_type = kInline; return *this; } Stage& Stage::compute_root() { // NOLINT(*) - CHECK_NE((*this)->attach_type, kScanUpdate) << "Cannot specify compute_at for scan updates"; + ICHECK_NE((*this)->attach_type, kScanUpdate) << "Cannot specify compute_at for scan updates"; (*this)->attach_type = kGroupRoot; return *this; } Stage& Stage::bind(IterVar ivar, IterVar thread_ivar) { // NOLINT(*) StageNode* self = operator->(); - CHECK(ivar->iter_type == kDataPar || ivar->iter_type == kCommReduce) + ICHECK(ivar->iter_type == kDataPar || ivar->iter_type == kCommReduce) << "Cannot bind " << IterVarType2String(ivar->iter_type) << " to thread"; - CHECK(thread_ivar->iter_type == kThreadIndex) + ICHECK(thread_ivar->iter_type == kThreadIndex) << "Cannot rebase by " << IterVarType2String(ivar->iter_type) << ", only thread axis is allowed so far"; ArrayNode* all_vars = self->all_iter_vars.CopyOnWrite(); @@ -193,9 +193,9 @@ Stage& Stage::bind(IterVar ivar, IterVar thread_ivar) { // NOLINT(*) Stage& Stage::env_threads(Array threads) { StageNode* self = operator->(); - CHECK(self->op.defined() && self->op.as()) + ICHECK(self->op.defined() && self->op.as()) << "env_threads is only valid for composite ops such as ScanOp"; - CHECK_EQ(self->env_threads.size(), 0U) << "Already set env_threads"; + ICHECK_EQ(self->env_threads.size(), 0U) << "Already set env_threads"; Array& leaf_vars = self->leaf_iter_vars; Array& all_vars = self->all_iter_vars; std::vector temp; @@ -228,11 +228,11 @@ Stage& Stage::split_by_nparts(IterVar parent, PrimExpr nparts, IterVar* p_outer, Stage& Stage::fuse(IterVar outer, IterVar inner, IterVar* p_target) { // NOLINT(*) StageNode* self = operator->(); - CHECK(outer->iter_type == kDataPar || outer->iter_type == kCommReduce || - outer->iter_type == kOrdered) + ICHECK(outer->iter_type == kDataPar || outer->iter_type == kCommReduce || + outer->iter_type == kOrdered) << "Cannot fuse " << IterVarType2String(outer->iter_type); - CHECK(inner->iter_type == kDataPar || inner->iter_type == kCommReduce || - inner->iter_type == kOrdered) + ICHECK(inner->iter_type == kDataPar || inner->iter_type == kCommReduce || + inner->iter_type == kOrdered) << "Cannot fuse " << IterVarType2String(inner->iter_type); IterVarType iter_type = outer->iter_type; @@ -251,7 +251,7 @@ Stage& Stage::fuse(IterVar outer, IterVar inner, IterVar* p_target) { // NOLINT std::swap(outer, inner); std::swap(pos_inner, pos_outer); } - CHECK_EQ(pos_inner, pos_outer + 1) + ICHECK_EQ(pos_inner, pos_outer + 1) << "Can only fuse iterations that are consecutive between each other"; self->relations.push_back(Fuse(outer, inner, fused)); all_vars.push_back(fused); @@ -288,11 +288,11 @@ Stage& Stage::reorder(const Array& order) { // NOLINT(*) std::unordered_set seen_var; StageNode* self = operator->(); for (IterVar iv : order) { - CHECK(iv->iter_type == kDataPar || iv->iter_type == kCommReduce || - iv->iter_type == kThreadIndex) + ICHECK(iv->iter_type == kDataPar || iv->iter_type == kCommReduce || + iv->iter_type == kThreadIndex) << "Cannot reorder IterVar(" << IterVarType2String(iv->iter_type) << ")"; - CHECK_EQ(seen_var.count(iv), 0) << "Same axis can not appear more than once " << iv; + ICHECK_EQ(seen_var.count(iv), 0) << "Same axis can not appear more than once " << iv; seen_var.insert(iv); } ArrayNode* all_vars = self->all_iter_vars.CopyOnWrite(); @@ -345,9 +345,9 @@ inline void SetAttrIterType(StageNode* self, IterVar var, IterVarType iter_type) } Stage& Stage::vectorize(IterVar var) { // NOLINT(*) - CHECK(var->iter_type == kDataPar || var->iter_type == kOpaque || var->iter_type == kUnrolled || - var->iter_type == kVectorized || var->iter_type == kTensorized || - var->iter_type == kParallelized) + ICHECK(var->iter_type == kDataPar || var->iter_type == kOpaque || var->iter_type == kUnrolled || + var->iter_type == kVectorized || var->iter_type == kTensorized || + var->iter_type == kParallelized) << "Cannot vectorize on " << IterVarType2String(var->iter_type); SetAttrIterType(operator->(), var, kVectorized); return *this; @@ -418,7 +418,7 @@ Stage& Stage::storage_align(IterVar axis, int factor, int offset) { Stage& Stage::double_buffer() { StageNode* self = operator->(); - CHECK(!self->is_output) << "Cannot apply double buffer on output"; + ICHECK(!self->is_output) << "Cannot apply double buffer on output"; self->double_buffer = true; return *this; } @@ -451,23 +451,23 @@ Schedule Schedule::copy() const { } for (Stage s : n->stages) { if (s->attach_stage.defined()) { - CHECK(smap.find(s->attach_stage) != smap.end()) + ICHECK(smap.find(s->attach_stage) != smap.end()) << s->attach_stage << " not found in " << (*this); s->attach_stage = smap.at(s->attach_stage); } if (s->group.defined()) { - CHECK(smap.find(s->group) != smap.end()) << s->group << " not found in " << (*this); + ICHECK(smap.find(s->group) != smap.end()) << s->group << " not found in " << (*this); s->group = smap.at(s->group); } } for (Stage s : n->groups) { if (s->attach_stage.defined()) { - CHECK(smap.find(s->attach_stage) != smap.end()) + ICHECK(smap.find(s->attach_stage) != smap.end()) << s->attach_stage << " not found in " << (*this); s->attach_stage = smap.at(s->attach_stage); } if (s->group.defined()) { - CHECK(smap.find(s->group) != smap.end()) << s->group << " not found in " << (*this); + ICHECK(smap.find(s->group) != smap.end()) << s->group << " not found in " << (*this); s->group = smap.at(s->group); } } @@ -476,7 +476,7 @@ Schedule Schedule::copy() const { Stage Schedule::operator[](const Operation& op) { auto it = (*this)->stage_map.find(op); - CHECK(it != (*this)->stage_map.end()) + ICHECK(it != (*this)->stage_map.end()) << "Cannot find Stage for operator " << op << " in the schedule"; return (*it).second; } @@ -504,7 +504,7 @@ Array RemapTensor(ScheduleNode* self, const Array& arr) { Array ret; for (Tensor t : arr) { if (!op2stage_cache.count(t->op.get())) { - CHECK(self->stage_map.count(t->op)) << "Given tensor is not in the schedule plan"; + ICHECK(self->stage_map.count(t->op)) << "Given tensor is not in the schedule plan"; t = self->stage_map[t->op]->op.output(t->value_index); } ret.push_back(t); @@ -534,7 +534,7 @@ Stage Schedule::create_group(const Array& outputs, const Array& for (size_t i = 0; i < ops.size(); ++i) { Operation op = ops[i]; auto it = op2stage_cache.find(op.get()); - CHECK(it != op2stage_cache.end()); + ICHECK(it != op2stage_cache.end()); Stage op_group = it->second->group; if (i == 0) { parent_group = op_group; @@ -575,7 +575,7 @@ Stage Schedule::create_group(const Array& outputs, const Array& // Verification and remappig the subgroups. for (auto& kv : counter) { if (kv.first.same_as(parent_group)) continue; - CHECK_EQ(kv.first->num_child_stages, kv.second.count) + ICHECK_EQ(kv.first->num_child_stages, kv.second.count) << "Trying to group region that intersect with an already existed group"; if (kv.first->group.same_as(parent_group)) { Stage s = kv.first; @@ -589,7 +589,7 @@ Stage Schedule::create_group(const Array& outputs, const Array& // Remap the group of op stages. for (Operation op : ops) { auto it = op2stage_cache.find(op.get()); - CHECK(it != op2stage_cache.end()); + ICHECK(it != op2stage_cache.end()); Stage s = it->second; if (s->group.same_as(parent_group)) { s->group = gstage; @@ -602,7 +602,7 @@ Stage Schedule::create_group(const Array& outputs, const Array& // Correct the attach to keep everything in group. for (Operation op : ops) { auto it = op2stage_cache.find(op.get()); - CHECK(it != op2stage_cache.end()); + ICHECK(it != op2stage_cache.end()); Stage s = it->second; if (s->attach_type == kScope) { Stage cg = LeastCommonAncestor(s->attach_stage->group, gstage); @@ -628,7 +628,7 @@ void ScheduleNode::InitCache() { op2stage_cache_[s->op.get()] = s; } } - CHECK_EQ(op2stage_cache_.size(), stages.size()); + ICHECK_EQ(op2stage_cache_.size(), stages.size()); } bool ScheduleNode::Contain(const Operation& op) const { @@ -667,7 +667,7 @@ Schedule::Schedule(Array ops) { for (size_t i = 0; i < scan->update.size(); ++i) { Stage s = n->stage_map[scan->update[i]->op]; - CHECK(scan_group.same_as(s->group)); + ICHECK(scan_group.same_as(s->group)); } } } @@ -726,8 +726,8 @@ void SpecializedCondition::EnterWithScope() { void SpecializedCondition::ExitWithScope() { TVMSpecializationThreadLocalEntry* entry = TVMSpecializationThreadLocalStore::Get(); - CHECK(!entry->condition_stack.empty()); - CHECK(entry->condition_stack.top().same_as(*this)); + ICHECK(!entry->condition_stack.empty()); + ICHECK(entry->condition_stack.top().same_as(*this)); entry->condition_stack.pop(); } diff --git a/src/te/schedule/schedule_ops.cc b/src/te/schedule/schedule_ops.cc index a16d9bb73000..355e3c39494b 100644 --- a/src/te/schedule/schedule_ops.cc +++ b/src/te/schedule/schedule_ops.cc @@ -69,13 +69,13 @@ class InjectAttach : public StmtMutator { debug_keep_trivial_loop_(debug_keep_trivial_loop) {} Stmt VisitStmt(const Stmt& input_stmt) final { - CHECK(input_stmt.defined()); + ICHECK(input_stmt.defined()); auto stmt = StmtMutator::VisitStmt(input_stmt); const AttrStmtNode* op = stmt.as(); if (op != nullptr && op->attr_key == tir::attr::loop_scope) { if (attach_spec_->attach_type == kScope && op->node == attach_spec_->attach_ivar) { - CHECK(!found_attach) << "Find IterVar" << attach_spec_->attach_ivar - << " in multiple places in the IR"; + ICHECK(!found_attach) << "Find IterVar" << attach_spec_->attach_ivar + << " in multiple places in the IR"; found_attach = true; stmt = AttrStmt(op->node, op->attr_key, op->value, MakePipeline(stage_, dom_map_, op->body, debug_keep_trivial_loop_)); @@ -111,7 +111,7 @@ class InjectScanStep : public StmtMutator { debug_keep_trivial_loop_(debug_keep_trivial_loop) {} Stmt VisitStmt(const Stmt& input_stmt) final { - CHECK(input_stmt.defined()); + ICHECK(input_stmt.defined()); auto stmt = StmtMutator::VisitStmt(input_stmt); // update const AttrStmtNode* op = stmt.as(); @@ -160,14 +160,14 @@ class SchedulePostProc : public StmtExprMutator { return this->VisitStmt(op->body); } else if (op->attr_key == tir::attr::scan_update_scope) { const ScanOpNode* scan = op->node.as(); - CHECK(scan); + ICHECK(scan); var_value_[scan->scan_axis->var.get()] = op->value; return this->VisitStmt(op->body); } else if (op->attr_key == tir::attr::thread_extent) { // delete duplicated thread extent attr auto it = thread_extent_scope_.find(op->node.get()); if (it != thread_extent_scope_.end()) { - CHECK(is_zero(analyzer_.Simplify(it->second - op->value))); + ICHECK(is_zero(analyzer_.Simplify(it->second - op->value))); return this->VisitStmt(op->body); } else { thread_extent_scope_[op->node.get()] = op->value; @@ -243,7 +243,7 @@ class SchedulePostProc : public StmtExprMutator { PrimExpr VisitExpr_(const ProducerLoadNode* op) final { PrimExpr expr = StmtExprMutator::VisitExpr_(op); op = expr.as(); - CHECK(op != nullptr); + ICHECK(op != nullptr); auto key = Downcast(op->producer); auto it = replace_buffer_.find(key); @@ -271,7 +271,7 @@ class SchedulePostProc : public StmtExprMutator { if (kv.second->bind_thread.defined()) { const Var& from = kv.first->var; const Var& to = kv.second->bind_thread->var; - CHECK(!var_value_.count(from.get())); + ICHECK(!var_value_.count(from.get())); var_value_[from.get()] = to; } } @@ -325,7 +325,8 @@ Stmt ScheduleOps(Schedule sch, Map dom_map_, bool debug_keep_tri if (!scan) continue; for (Tensor t : scan->init) { if (scan_init.count(t->op)) { - CHECK(scan_init.at(t->op).same_as(s->op)) << "Scan init tensor can only belong to one scan"; + ICHECK(scan_init.at(t->op).same_as(s->op)) + << "Scan init tensor can only belong to one scan"; } else { scan_init[t->op] = s->op; } @@ -333,44 +334,44 @@ Stmt ScheduleOps(Schedule sch, Map dom_map_, bool debug_keep_tri } // verify correctness of group. for (Stage g : sch->groups) { - CHECK(!g->op.defined()); - CHECK_EQ(g->leaf_iter_vars.size(), 0U); + ICHECK(!g->op.defined()); + ICHECK_EQ(g->leaf_iter_vars.size(), 0U); } // reverse the post DFS order. for (size_t i = sch->stages.size(); i != 0; --i) { Stage s = sch->stages[i - 1]; - CHECK_NE(s->attach_type, kInline) << "call schedule.normalize before scheduleops"; - CHECK(s->op.defined()); + ICHECK_NE(s->attach_type, kInline) << "call schedule.normalize before scheduleops"; + ICHECK(s->op.defined()); // no need to specify place holder op. if (s->op.as()) continue; // Remove grouping sugar, get the real attach spec. Stage attach_spec = s.GetAttachSpec(); if (scan_init.count(s->op)) { - CHECK(body.defined()); + ICHECK(body.defined()); InjectScanStep mu(s, scan_init.at(s->op), dom_map, true, debug_keep_trivial_loop); body = mu(std::move(body)); - CHECK(mu.found_attach) << "did not find attachment point for scan.init"; + ICHECK(mu.found_attach) << "did not find attachment point for scan.init"; } else if (attach_spec->attach_type == kScanUpdate) { // Handle scan update - CHECK(body.defined()); + ICHECK(body.defined()); InjectScanStep mu(s, attach_spec->attach_stage->op, dom_map, false, debug_keep_trivial_loop); body = mu(std::move(body)); - CHECK(mu.found_attach) << "did not find attachment point for scan.update"; + ICHECK(mu.found_attach) << "did not find attachment point for scan.update"; } else if (attach_spec->attach_type == kInlinedAlready) { // do nothing } else if (attach_spec->attach_type == kGroupRoot) { - CHECK(!s->group.defined()); + ICHECK(!s->group.defined()); body = MakePipeline(s, dom_map, body, debug_keep_trivial_loop); } else { - CHECK_EQ(attach_spec->attach_type, kScope); - CHECK(body.defined()); + ICHECK_EQ(attach_spec->attach_type, kScope); + ICHECK(body.defined()); InjectAttach mutator(s, attach_spec, dom_map, debug_keep_trivial_loop); body = mutator(std::move(body)); - CHECK(mutator.found_attach) << "did not find attachment point for " << s << " in " - << attach_spec->attach_stage->op << " x " - << attach_spec->attach_ivar << ", body:\n" - << body; + ICHECK(mutator.found_attach) + << "did not find attachment point for " << s << " in " << attach_spec->attach_stage->op + << " x " << attach_spec->attach_ivar << ", body:\n" + << body; } } SchedulePostProc post_proc; diff --git a/src/te/schedule/schedule_postproc_rewrite_for_tensor_core.cc b/src/te/schedule/schedule_postproc_rewrite_for_tensor_core.cc index 7c4a3c7f6ebd..f81d72e0fe02 100644 --- a/src/te/schedule/schedule_postproc_rewrite_for_tensor_core.cc +++ b/src/te/schedule/schedule_postproc_rewrite_for_tensor_core.cc @@ -415,7 +415,7 @@ class BufferAnalyser : public StmtExprVisitor { } else if (op->attr_key == tir::attr::buffer_dim_align) { te::Tensor tensor = Downcast(op->node); const CallNode* tuple = op->value.as(); - CHECK(tuple && tuple->op.same_as(builtin::tvm_tuple())); + ICHECK(tuple && tuple->op.same_as(builtin::tvm_tuple())); auto& vinfo = dim_align_[tensor]; size_t dim = tuple->args[0].as()->value; if (dim >= vinfo.size()) { @@ -433,9 +433,9 @@ class BufferAnalyser : public StmtExprVisitor { StmtExprVisitor::VisitStmt_(op); auto key = Downcast(op->producer); auto it = buf_map_.find(key); - CHECK(it != buf_map_.end()) << "Cannot find allocated buffer for " << key->GetNameHint(); + ICHECK(it != buf_map_.end()) << "Cannot find allocated buffer for " << key->GetNameHint(); const BufferInfo& bi = it->second; - CHECK(!bi.released) << "Read a buffer that is already out of scope"; + ICHECK(!bi.released) << "Read a buffer that is already out of scope"; if (matrix_abc_.count(key->GetNameHint())) { if (bi.shape.size() < 2) { @@ -535,9 +535,9 @@ class BufferAnalyser : public StmtExprVisitor { auto tensor = Downcast(op->producer); auto it = buf_map_.find(tensor); - CHECK(it != buf_map_.end()) << "Cannot find allocated buffer for " << tensor->GetNameHint(); + ICHECK(it != buf_map_.end()) << "Cannot find allocated buffer for " << tensor->GetNameHint(); const BufferInfo& bi = it->second; - CHECK(!bi.released) << "Read a buffer that is already out of scope"; + ICHECK(!bi.released) << "Read a buffer that is already out of scope"; if (matrix_abc_.count(tensor->op->name)) { if (bi.shape.size() < 2) { @@ -591,7 +591,7 @@ class BufferAnalyser : public StmtExprVisitor { void VisitStmt_(const ProducerRealizeNode* op) final { auto key = Downcast(op->producer); if (buf_map_.count(key)) { - CHECK(buf_map_.at(key).external); + ICHECK(buf_map_.at(key).external); this->VisitStmt(op->body); } else { // create a buffer entry @@ -678,7 +678,7 @@ class BufferAnalyser : public StmtExprVisitor { inline Array RelIndex(Array args) const { if (bounds.size() != 0) { Array index; - CHECK_EQ(bounds.size(), args.size()); + ICHECK_EQ(bounds.size(), args.size()); for (size_t i = 0; i < bounds.size(); ++i) { index.push_back(args[i] - bounds[i]->min); } @@ -797,7 +797,7 @@ class TensorCoreIRMutator : public StmtExprMutator { for (size_t i = 0; i < op->bounds.size() - 2; ++i) { new_bounds.push_back(op->bounds[i]); } - CHECK_GE(op->bounds.size(), 2) << "Less than 2 dimensions for matrix " << key->GetNameHint(); + ICHECK_GE(op->bounds.size(), 2) << "Less than 2 dimensions for matrix " << key->GetNameHint(); new_bounds.push_back( Range::FromMinExtent(op->bounds[op->bounds.size() - 2]->min, new_extents[0])); new_bounds.push_back( @@ -818,7 +818,7 @@ class TensorCoreIRMutator : public StmtExprMutator { } auto it = matrix_abc_.find(simplify_name(node->name)); - CHECK(it != matrix_abc_.end()) << "Cannot find matrix info for " << node->name; + ICHECK(it != matrix_abc_.end()) << "Cannot find matrix info for " << node->name; auto matrix_abc = tvm::tir::StringImm("wmma." + it->second); Stmt body = this->VisitStmt(op->body); return AttrStmt(op->node, op->attr_key, matrix_abc, body); @@ -887,12 +887,12 @@ class TensorCoreIRMutator : public StmtExprMutator { } const ProducerLoadNode* value = op->value.as(); - CHECK(value != nullptr) << "Can only load fragment from a buffer"; + ICHECK(value != nullptr) << "Can only load fragment from a buffer"; auto it = strides_.find(value->producer->GetNameHint()); - CHECK(it != strides_.end()) << "Cannot find stride for " << value->producer->GetNameHint(); + ICHECK(it != strides_.end()) << "Cannot find stride for " << value->producer->GetNameHint(); auto strides = it->second; - CHECK_GE(strides.size(), 2); + ICHECK_GE(strides.size(), 2); PrimExpr stride = strides[strides.size() - 2]; // thread index unification inside a warp @@ -905,7 +905,7 @@ class TensorCoreIRMutator : public StmtExprMutator { auto pload = dst.as(); PrimExpr matrix_major; auto iter2 = matrix_major_.find(simplify_name(pload->producer->GetNameHint())); - CHECK(iter2 != matrix_major_.end()) + ICHECK(iter2 != matrix_major_.end()) << "Can not determine matrix major for " << pload->producer->GetNameHint(); if (iter2->second == "col_major") { matrix_major = StringImm("col_major"); @@ -928,9 +928,9 @@ class TensorCoreIRMutator : public StmtExprMutator { auto it3 = frag_store_.find(op); if (it3 != frag_store_.end()) { auto it = strides_.find(op->producer->GetNameHint()); - CHECK(it != strides_.end()) << "Cannot find stride for " << op->producer->GetNameHint(); + ICHECK(it != strides_.end()) << "Cannot find stride for " << op->producer->GetNameHint(); auto strides = it->second; - CHECK_GE(strides.size(), 2); + ICHECK_GE(strides.size(), 2); PrimExpr stride = strides[strides.size() - 2]; PrimExpr dst = it3->second; @@ -978,7 +978,7 @@ class TensorCoreIRMutator : public StmtExprMutator { Array get_tile_size_(const std::string& name) { auto it = matrix_abc_.find(name); auto it2 = matrix_major_.find(name); - CHECK(it != matrix_abc_.end() && it2 != matrix_major_.end()) + ICHECK(it != matrix_abc_.end() && it2 != matrix_major_.end()) << "Cannot find matrix info for " << name; PrimExpr size0 = make_const(DataType::Int(32), 16); PrimExpr size1 = make_const(DataType::Int(32), 16); @@ -1011,13 +1011,13 @@ class TensorCoreIRMutator : public StmtExprMutator { const std::function& call_back) { auto tensor = Downcast(pload->producer); auto it = bounds_.find(tensor); - CHECK(it != bounds_.end()); + ICHECK(it != bounds_.end()); Array min_bound; for (auto i : it->second) { min_bound.push_back(i->min); } - CHECK_GE(it->second.size(), 2); + ICHECK_GE(it->second.size(), 2); Array shape; for (size_t i = 0; i < it->second.size() - 2; ++i) { shape.push_back(it->second[i]->extent); @@ -1037,13 +1037,13 @@ class TensorCoreIRMutator : public StmtExprMutator { strides.push_back(make_const(DataType::Int(32), 1)); PrimExpr elem_offset = IntImm(DataType::Int(32), 0); - CHECK_EQ(pload->indices.size(), min_bound.size()); + ICHECK_EQ(pload->indices.size(), min_bound.size()); for (size_t i = 0; i < min_bound.size(); i++) { elem_offset = Add(elem_offset, Mul(strides[i], Sub(pload->indices[i], min_bound[i]))); } auto it2 = matrix_abc_.find(simplify_name(tensor->op->name)); - CHECK(it2 != matrix_abc_.end()) << "Cannot find matrix info for " << tensor->op->name; + ICHECK(it2 != matrix_abc_.end()) << "Cannot find matrix info for " << tensor->op->name; buffer_node->data = Var(tensor->op->name, DataType::Handle()); buffer_node->name = tensor->op->name; buffer_node->scope = "wmma." + it2->second; diff --git a/src/te/schedule/schedule_postproc_to_primfunc.cc b/src/te/schedule/schedule_postproc_to_primfunc.cc index a86ad76b0eb9..1710a91c6985 100644 --- a/src/te/schedule/schedule_postproc_to_primfunc.cc +++ b/src/te/schedule/schedule_postproc_to_primfunc.cc @@ -128,7 +128,7 @@ class TensorToBufferMapper : public StmtExprMutator { Buffer GetBuffer(const Tensor& tensor, bool allow_alloc = false) { auto it = buffer_map_.find(tensor); if (it != buffer_map_.end()) return it->second; - CHECK(allow_alloc) << "Cannot find the Realization point of tensor " << tensor; + ICHECK(allow_alloc) << "Cannot find the Realization point of tensor " << tensor; auto buffer = CreateBufferFor(tensor); buffer_map_[tensor] = buffer; @@ -156,7 +156,7 @@ PrimFunc SchedulePostProcToPrimFunc(Array arg_list, Stmt body, params.push_back(GetRef(n)); } else if (auto* n = var.as()) { te::Tensor tensor = GetRef(n); - CHECK(!extern_buffer.count(tensor)); + ICHECK(!extern_buffer.count(tensor)); tir::Buffer buffer = CreateBufferFor(tensor); tir::Var bptr(buffer->name, DataType::Handle()); diff --git a/src/te/tensor.cc b/src/te/tensor.cc index e66b9632d8a2..18d4947cdddc 100644 --- a/src/te/tensor.cc +++ b/src/te/tensor.cc @@ -46,8 +46,8 @@ PrimExpr Tensor::operator()(Array indices) const { PrimExpr Tensor::operator()(Array indices) const { if (ndim() != 0) { - CHECK_EQ(ndim(), indices.size()) << "Tensor dimension mismatch in read" - << "ndim = " << ndim() << ", indices.size=" << indices.size(); + ICHECK_EQ(ndim(), indices.size()) << "Tensor dimension mismatch in read" + << "ndim = " << ndim() << ", indices.size=" << indices.size(); } return ProducerLoad((*this), indices); diff --git a/src/tir/analysis/verify_gpu_code.cc b/src/tir/analysis/verify_gpu_code.cc index 5ef755a1b5a1..afd3c7add605 100644 --- a/src/tir/analysis/verify_gpu_code.cc +++ b/src/tir/analysis/verify_gpu_code.cc @@ -94,7 +94,7 @@ class GPUCodeVerifier : public StmtExprVisitor { Var var = op->node.as()->var; const auto* extent = op->value.as(); - CHECK(extent); + ICHECK(extent); std::string name = var.get()->name_hint; // record the number of threads in a block @@ -167,7 +167,7 @@ class GPUCodeVerifier : public StmtExprVisitor { void VisitStmt_(const ForNode* op) { if (op->loop_var->name_hint == "vthread.s") { const auto* extent = op->extent.as(); - CHECK(extent); + ICHECK(extent); size_t num_vthread = static_cast(extent->value); if (num_vthread > max_vthread_) { diff --git a/src/tir/analysis/verify_memory.cc b/src/tir/analysis/verify_memory.cc index 64097e1d343a..905384f29908 100644 --- a/src/tir/analysis/verify_memory.cc +++ b/src/tir/analysis/verify_memory.cc @@ -170,7 +170,7 @@ class MemoryAccessVerifier final : protected StmtExprVisitor { /// Interface of VerifyMemory pass std::vector VerifyMemory_(const PrimFunc& func) { auto target = func->GetAttr(tvm::attr::kTarget); - CHECK(target.defined()) << "LowerWarpMemory: Require the target attribute"; + ICHECK(target.defined()) << "LowerWarpMemory: Require the target attribute"; if (func->GetAttr(tvm::attr::kCallingConv, Integer(CallingConv::kDefault)) == CallingConv::kDefault) { diff --git a/src/tir/analysis/verify_ssa.cc b/src/tir/analysis/verify_ssa.cc index 834ad09cb61a..d7ccb363c16e 100644 --- a/src/tir/analysis/verify_ssa.cc +++ b/src/tir/analysis/verify_ssa.cc @@ -148,7 +148,7 @@ Pass VerifySSA() { for (auto kv : mod->functions) { if (auto* n = kv.second.as()) { auto func = GetRef(n); - CHECK(VerifySSA(func)) << "RuntimeError: IR is not in SSA form" << func; + ICHECK(VerifySSA(func)) << "RuntimeError: IR is not in SSA form" << func; } } return mod; diff --git a/src/tir/ir/buffer.cc b/src/tir/ir/buffer.cc index d33f2ddf698a..08b2224e9912 100644 --- a/src/tir/ir/buffer.cc +++ b/src/tir/ir/buffer.cc @@ -244,10 +244,10 @@ inline PrimExpr ElemOffset(const BufferNode* n, Array index) { // Scalar case if (n->shape.size() == 0 && index.size() == 1) { auto is_int = index[0].as(); - CHECK(is_int && is_int->value == 0); + ICHECK(is_int && is_int->value == 0); base = base + index[0]; } else { - CHECK_EQ(n->shape.size(), index.size()); + ICHECK_EQ(n->shape.size(), index.size()); if (index.size() > 0) { PrimExpr offset = index[0]; for (size_t i = 1; i < index.size(); ++i) { @@ -257,7 +257,7 @@ inline PrimExpr ElemOffset(const BufferNode* n, Array index) { } } } else { - CHECK_EQ(n->strides.size(), index.size()); + ICHECK_EQ(n->strides.size(), index.size()); if (is_zero(base)) { base = MergeMulMod(&ana, index[0] * n->strides[0]); } else { @@ -285,7 +285,7 @@ inline PrimExpr BufferOffset(const BufferNode* n, Array index, DataTyp PrimExpr Buffer::vload(Array begin, DataType dtype) const { // specially handle bool, stored as DataType::Int(8) const BufferNode* n = operator->(); - CHECK(dtype.element_of() == n->dtype.element_of() && dtype.lanes() % n->dtype.lanes() == 0) + ICHECK(dtype.element_of() == n->dtype.element_of() && dtype.lanes() % n->dtype.lanes() == 0) << "Cannot load " << dtype << " from buffer of " << n->dtype; if (dtype == DataType::Bool()) { return tir::Cast(DataType::Bool(), @@ -300,7 +300,7 @@ Stmt Buffer::vstore(Array begin, PrimExpr value) const { // specially handle bool, stored as DataType::Int(8) const BufferNode* n = operator->(); DataType dtype = value.dtype(); - CHECK(dtype.element_of() == n->dtype.element_of() && dtype.lanes() % n->dtype.lanes() == 0) + ICHECK(dtype.element_of() == n->dtype.element_of() && dtype.lanes() % n->dtype.lanes() == 0) << "Cannot store " << dtype << " to buffer of " << n->dtype; if (value.dtype() == DataType::Bool()) { return tir::Store(n->data, tir::Cast(DataType::Int(8), value), @@ -383,7 +383,7 @@ PrimExpr Buffer::access_ptr(int access_mask, DataType ptr_type, int content_lane Buffer::Buffer(Var data, DataType dtype, Array shape, Array strides, PrimExpr elem_offset, String name, String scope, int data_alignment, int offset_factor, BufferType buffer_type) { - CHECK(IsPointerType(data->type_annotation, dtype)) + ICHECK(IsPointerType(data->type_annotation, dtype)) << "Buffer data field expect to have the right pointer type annotation" << " annotation=" << data->type_annotation << ", dtype=" << dtype; @@ -428,7 +428,7 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) TVM_REGISTER_NODE_TYPE(BufferNode); TVM_REGISTER_GLOBAL("tir.Buffer").set_body([](TVMArgs args, TVMRetValue* ret) { - CHECK_EQ(args.size(), 10); + ICHECK_EQ(args.size(), 10); auto buffer_type = args[9].operator String(); BufferType type = (buffer_type == "auto_broadcast") ? kAutoBroadcast : kDefault; *ret = diff --git a/src/tir/ir/data_layout.cc b/src/tir/ir/data_layout.cc index bc777db55dbe..da3496dba407 100644 --- a/src/tir/ir/data_layout.cc +++ b/src/tir/ir/data_layout.cc @@ -54,7 +54,7 @@ const LayoutAxis LayoutAxis::LOWER_CASE[] = { LayoutAxis('z')}; const LayoutAxis& LayoutAxis::Get(const char name) { - CHECK((name >= 'A' && name <= 'Z') || (name >= 'a' && name <= 'z')) + ICHECK((name >= 'A' && name <= 'Z') || (name >= 'a' && name <= 'z')) << "Invalid layout axis name: " << name << ". Has to be A-Z or a-z."; return (name >= 'A' && name <= 'Z') ? LayoutAxis::UPPER_CASE[name - 'A'] : LayoutAxis::LOWER_CASE[name - 'a']; @@ -62,12 +62,12 @@ const LayoutAxis& LayoutAxis::Get(const char name) { const LayoutAxis& LayoutAxis::Get(const IterVar& itvar) { const std::string axis = itvar->var.get()->name_hint; - CHECK_EQ(axis.size(), 1) << "Invalid layout axis " << axis; + ICHECK_EQ(axis.size(), 1) << "Invalid layout axis " << axis; return LayoutAxis::Get(axis[0]); } const LayoutAxis& LayoutAxis::Get(const std::string& name) { - CHECK_EQ(name.length(), 1) << "Invalid axis " << name; + ICHECK_EQ(name.length(), 1) << "Invalid axis " << name; return LayoutAxis::Get(name[0]); } @@ -77,13 +77,13 @@ Layout::Layout(const Array& axes) { std::ostringstream repr; for (const IterVar& axis : axes) { if (const auto* factor = axis->dom->extent.as()) { - CHECK_GT(factor->value, 0); + ICHECK_GT(factor->value, 0); repr << factor->value; } - CHECK_EQ(axis->var.get()->name_hint.size(), 1) + ICHECK_EQ(axis->var.get()->name_hint.size(), 1) << "Invalid layout axis " << axis->var.get()->name_hint; char c = axis->var.get()->name_hint.operator std::string()[0]; - CHECK((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')) << "Invalid layout axis " << c; + ICHECK((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')) << "Invalid layout axis " << c; repr << axis->var.get()->name_hint; } node->name = repr.str(); @@ -102,22 +102,22 @@ Layout::Layout(const std::string& name) { // NOLINT(*) int32_t factor = 0; for (char c : name) { if (c >= 'A' && c <= 'Z') { - CHECK_EQ(factor, 0) << "Invalid layout " << name << ": invalid factor size " << factor - << " before dimension " << c; + ICHECK_EQ(factor, 0) << "Invalid layout " << name << ": invalid factor size " << factor + << " before dimension " << c; std::string shape_name("_shape"); shape_name.insert(0, 1, c); IterVar axis = IterVar(Range(PrimExpr(0), Var(shape_name)), Var(std::string(1, c)), tir::kDataPar); node->axes.push_back(axis); } else if (c >= 'a' && c <= 'z') { - CHECK_GT(factor, 0) << "Invalid layout " << name << ": invalid factor size " << factor - << " for dimension " << c; + ICHECK_GT(factor, 0) << "Invalid layout " << name << ": invalid factor size " << factor + << " for dimension " << c; IterVar axis = IterVar(Range(PrimExpr(0), PrimExpr(factor)), Var(std::string(1, c)), tir::kDataPar); node->axes.push_back(axis); factor = 0; } else if (c >= '0' && c <= '9') { - CHECK(factor >= 0) << "Invalid layout " << name << ": _ is adjacent to a number."; + ICHECK(factor >= 0) << "Invalid layout " << name << ": _ is adjacent to a number."; factor = factor * 10 + c - '0'; } else { LOG(FATAL) << "Invalid layout " << name; @@ -128,16 +128,16 @@ Layout::Layout(const std::string& name) { // NOLINT(*) std::vector exist_axis(256, false); for (const IterVar& v : node->axes) { auto axis_str = v->var.get()->name_hint.operator std::string(); - CHECK_EQ(axis_str.size(), 1); + ICHECK_EQ(axis_str.size(), 1); char axis = axis_str[0]; - CHECK((axis >= 'a' && axis <= 'z') || (axis >= 'A' && axis <= 'Z')); - CHECK(!exist_axis[axis]) << "Invalid layout " << name << ": duplicate axis " << axis; + ICHECK((axis >= 'a' && axis <= 'z') || (axis >= 'A' && axis <= 'Z')); + ICHECK(!exist_axis[axis]) << "Invalid layout " << name << ": duplicate axis " << axis; exist_axis[axis] = true; } for (const IterVar& v : node->axes) { char axis = v->var.get()->name_hint.operator std::string()[0]; if (axis >= 'a' && axis <= 'z') { - CHECK(exist_axis[axis - 'a' + 'A']) + ICHECK(exist_axis[axis - 'a' + 'A']) << "Invalid layout " << name << ": missing axis " << std::toupper(axis); } } @@ -160,13 +160,13 @@ Layout Layout::Split(const LayoutAxis& axis, size_t target_pos, int32_t factor) if (!defined()) return Layout::Undef(); const std::string& name = operator->()->name; const auto axes = operator->()->axes; - CHECK(target_pos <= this->ndim()) + ICHECK(target_pos <= this->ndim()) << "Invalid split position " << target_pos << " for layout " << name; - CHECK(axis.IsPrimal()) << "Cannot split a subordinate axis " << axis; - CHECK(this->Contains(axis)) << "Axis " << axis << " does not exist in " << name; - CHECK(!this->Contains(axis.ToSubordinate())) + ICHECK(axis.IsPrimal()) << "Cannot split a subordinate axis " << axis; + ICHECK(this->Contains(axis)) << "Axis " << axis << " does not exist in " << name; + ICHECK(!this->Contains(axis.ToSubordinate())) << "Axis " << axis << " has already been split in " << name; - CHECK(factor > 0) << "Invalid split size " << factor; + ICHECK(factor > 0) << "Invalid split size " << factor; Array new_layout; for (size_t i = 0; i <= this->ndim(); ++i) { if (i == target_pos) { @@ -186,7 +186,7 @@ int32_t Layout::FactorOf(const LayoutAxis& axis) const { for (const IterVar& itvar : operator->()->axes) { if (sub == LayoutAxis::Get(itvar)) { const auto* factor = itvar->dom->extent.as(); - CHECK(factor); + ICHECK(factor); return factor->value; } } @@ -261,17 +261,17 @@ inline Array TransformIndex(const Array& src_index, } Array BijectiveLayout::ForwardIndex(const Array& src_index) const { - CHECK(defined()) << "Cannot operate on an undefined bijective layout."; + ICHECK(defined()) << "Cannot operate on an undefined bijective layout."; const BijectiveLayoutNode* self = operator->(); - CHECK_EQ(src_index.size(), self->src_layout->axes.size()) + ICHECK_EQ(src_index.size(), self->src_layout->axes.size()) << "Input mismatch with layout " << self->src_layout; return TransformIndex(src_index, self->src_layout->axes, self->forward_rule); } Array BijectiveLayout::BackwardIndex(const Array& dst_index) const { - CHECK(defined()) << "Cannot operate on an undefined bijective layout."; + ICHECK(defined()) << "Cannot operate on an undefined bijective layout."; const BijectiveLayoutNode* self = operator->(); - CHECK_EQ(dst_index.size(), self->dst_layout->axes.size()) + ICHECK_EQ(dst_index.size(), self->dst_layout->axes.size()) << "Output mismatch with layout " << self->dst_layout; return TransformIndex(dst_index, self->dst_layout->axes, self->backward_rule); } @@ -281,7 +281,7 @@ inline Array TransformShape(const Array& src_shape, const Array& target_axis, const Array& transform_rule) { arith::Analyzer ana; - CHECK_EQ(src_shape.size(), src_axis.size()); + ICHECK_EQ(src_shape.size(), src_axis.size()); // bind variables for original axes // for major-axis, bind the corresponding size // for minor-axis, simply bind it as 0, so that we can reuse forward/backward_rule, @@ -299,7 +299,7 @@ inline Array TransformShape(const Array& src_shape, const auto* orig_shape_const = orig_shape.as(); const auto* orig_axis_extent = orig_axis->dom->extent.as(); if (orig_shape_const) { - CHECK_EQ(orig_shape_const->value, orig_axis_extent->value) + ICHECK_EQ(orig_shape_const->value, orig_axis_extent->value) << "Input shape mismatch at index " << i << ". Expected " << orig_axis->dom->extent << ", get " << orig_shape; } @@ -313,7 +313,7 @@ inline Array TransformShape(const Array& src_shape, // for major-axis, use the forward/backward_rule directly, // for minor-axis, simply use the extent. Array result; - CHECK_EQ(transform_rule.size(), target_axis.size()); + ICHECK_EQ(transform_rule.size(), target_axis.size()); for (size_t i = 0; i < transform_rule.size(); ++i) { PrimExpr rule = transform_rule[i]; IterVar axis = target_axis[i]; @@ -331,13 +331,13 @@ inline Array TransformShape(const Array& src_shape, } Array BijectiveLayout::ForwardShape(const Array& shape) const { - CHECK(defined()) << "Cannot operate on an undefined bijective layout."; + ICHECK(defined()) << "Cannot operate on an undefined bijective layout."; const BijectiveLayoutNode* self = operator->(); return TransformShape(shape, self->src_layout->axes, self->dst_layout->axes, self->forward_rule); } Array BijectiveLayout::BackwardShape(const Array& shape) const { - CHECK(defined()) << "Cannot operate on an undefined bijective layout."; + ICHECK(defined()) << "Cannot operate on an undefined bijective layout."; const BijectiveLayoutNode* self = operator->(); return TransformShape(shape, self->dst_layout->axes, self->src_layout->axes, self->backward_rule); } @@ -351,7 +351,7 @@ BijectiveLayout::BijectiveLayout(Layout src_layout, Layout dst_layout) { // To be consistent with previous behavior, a nullptr layout is created // when argument is invalid. if (GetStoreRule(&n->forward_rule, n->src_layout, n->dst_layout)) { - CHECK(GetStoreRule(&n->backward_rule, n->dst_layout, n->src_layout)); + ICHECK(GetStoreRule(&n->backward_rule, n->dst_layout, n->src_layout)); data_ = std::move(n); } } diff --git a/src/tir/ir/expr.cc b/src/tir/ir/expr.cc index f648aca18e46..825bac86919c 100644 --- a/src/tir/ir/expr.cc +++ b/src/tir/ir/expr.cc @@ -33,30 +33,30 @@ namespace tvm { namespace tir { -#define TVM_DEFINE_BINOP_CONSTRUCTOR(Name) \ - Name::Name(PrimExpr a, PrimExpr b) { \ - using T = Name::ContainerType; \ - CHECK(a.defined()) << "ValueError: a is undefined\n"; \ - CHECK(b.defined()) << "ValueError: b is undefined\n"; \ - CHECK(a.dtype() == b.dtype()) << "TypeError: mismatched types\n"; \ - ObjectPtr node = make_object(); \ - node->dtype = a.dtype(); \ - node->a = std::move(a); \ - node->b = std::move(b); \ - data_ = std::move(node); \ +#define TVM_DEFINE_BINOP_CONSTRUCTOR(Name) \ + Name::Name(PrimExpr a, PrimExpr b) { \ + using T = Name::ContainerType; \ + ICHECK(a.defined()) << "ValueError: a is undefined\n"; \ + ICHECK(b.defined()) << "ValueError: b is undefined\n"; \ + ICHECK(a.dtype() == b.dtype()) << "TypeError: mismatched types\n"; \ + ObjectPtr node = make_object(); \ + node->dtype = a.dtype(); \ + node->a = std::move(a); \ + node->b = std::move(b); \ + data_ = std::move(node); \ } -#define TVM_DEFINE_CMPOP_CONSTRUCTOR(Name) \ - Name::Name(PrimExpr a, PrimExpr b) { \ - using T = Name::ContainerType; \ - CHECK(a.defined()) << "ValueError: a is undefined\n"; \ - CHECK(b.defined()) << "ValueError: b is undefined\n"; \ - CHECK(a.dtype() == b.dtype()) << "TypeError: mismatched types\n"; \ - ObjectPtr node = make_object(); \ - node->dtype = DataType::Bool(a.dtype().lanes()); \ - node->a = std::move(a); \ - node->b = std::move(b); \ - data_ = std::move(node); \ +#define TVM_DEFINE_CMPOP_CONSTRUCTOR(Name) \ + Name::Name(PrimExpr a, PrimExpr b) { \ + using T = Name::ContainerType; \ + ICHECK(a.defined()) << "ValueError: a is undefined\n"; \ + ICHECK(b.defined()) << "ValueError: b is undefined\n"; \ + ICHECK(a.dtype() == b.dtype()) << "TypeError: mismatched types\n"; \ + ObjectPtr node = make_object(); \ + node->dtype = DataType::Bool(a.dtype().lanes()); \ + node->a = std::move(a); \ + node->b = std::move(b); \ + data_ = std::move(node); \ } // Var @@ -178,8 +178,8 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) // Cast Cast::Cast(DataType t, PrimExpr value) { - CHECK(value.defined()); - CHECK_EQ(t.lanes(), value.dtype().lanes()); + ICHECK(value.defined()); + ICHECK_EQ(t.lanes(), value.dtype().lanes()); ObjectPtr node = make_object(); node->dtype = t; node->value = std::move(value); @@ -453,11 +453,11 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) // And And::And(PrimExpr a, PrimExpr b) { - CHECK(a.defined()) << "ValueError: a is undefined"; - CHECK(b.defined()) << "ValueError: b is undefined"; - CHECK(a.dtype().is_bool()); - CHECK(b.dtype().is_bool()); - CHECK(a.dtype() == b.dtype()) << "TypeError: mismatched types"; + ICHECK(a.defined()) << "ValueError: a is undefined"; + ICHECK(b.defined()) << "ValueError: b is undefined"; + ICHECK(a.dtype().is_bool()); + ICHECK(b.dtype().is_bool()); + ICHECK(a.dtype() == b.dtype()) << "TypeError: mismatched types"; ObjectPtr node = make_object(); node->dtype = DataType::Bool(a.dtype().lanes()); @@ -482,11 +482,11 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) // Or Or::Or(PrimExpr a, PrimExpr b) { - CHECK(a.defined()) << "ValueError: a is undefined"; - CHECK(b.defined()) << "ValueError: b is undefined"; - CHECK(a.dtype().is_bool()); - CHECK(b.dtype().is_bool()); - CHECK(a.dtype() == b.dtype()) << "TypeError: mismatched types"; + ICHECK(a.defined()) << "ValueError: a is undefined"; + ICHECK(b.defined()) << "ValueError: b is undefined"; + ICHECK(a.dtype().is_bool()); + ICHECK(b.dtype().is_bool()); + ICHECK(a.dtype() == b.dtype()) << "TypeError: mismatched types"; ObjectPtr node = make_object(); node->dtype = DataType::Bool(a.dtype().lanes()); @@ -511,8 +511,8 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) // Not Not::Not(PrimExpr a) { - CHECK(a.defined()) << "ValueError: a is undefined"; - CHECK(a.dtype().is_bool()); + ICHECK(a.defined()) << "ValueError: a is undefined"; + ICHECK(a.dtype().is_bool()); ObjectPtr node = make_object(); node->dtype = DataType::Bool(a.dtype().lanes()); @@ -533,12 +533,12 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) // Select Select::Select(PrimExpr condition, PrimExpr true_value, PrimExpr false_value) { - CHECK(condition.defined()) << "ValueError: condition is undefined"; - CHECK(true_value.defined()) << "ValueError: true_value is undefined"; - CHECK(false_value.defined()) << "ValueError: true_value is undefined"; - CHECK(condition.dtype().is_bool()); - CHECK(condition.dtype().lanes() == true_value.dtype().lanes() || condition.dtype().lanes() == 1); - CHECK(false_value.dtype() == true_value.dtype()) << "TypeError: mismatched types"; + ICHECK(condition.defined()) << "ValueError: condition is undefined"; + ICHECK(true_value.defined()) << "ValueError: true_value is undefined"; + ICHECK(false_value.defined()) << "ValueError: true_value is undefined"; + ICHECK(condition.dtype().is_bool()); + ICHECK(condition.dtype().lanes() == true_value.dtype().lanes() || condition.dtype().lanes() == 1); + ICHECK(false_value.dtype() == true_value.dtype()) << "TypeError: mismatched types"; ObjectPtr node = make_object(); node->dtype = true_value.dtype(); @@ -569,11 +569,11 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) // Load Load::Load(DataType dtype, Var buffer_var, PrimExpr index, PrimExpr predicate) { - CHECK(buffer_var.defined()); - CHECK(predicate.defined()); - CHECK(index.defined()); - CHECK_EQ(dtype.lanes(), index.dtype().lanes()); - CHECK_EQ(dtype.lanes(), predicate.dtype().lanes()); + ICHECK(buffer_var.defined()); + ICHECK(predicate.defined()); + ICHECK(index.defined()); + ICHECK_EQ(dtype.lanes(), index.dtype().lanes()); + ICHECK_EQ(dtype.lanes(), predicate.dtype().lanes()); ObjectPtr node = make_object(); node->dtype = dtype; @@ -609,12 +609,12 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) // Ramp Ramp::Ramp(PrimExpr base, PrimExpr stride, int lanes) { - CHECK(base.defined()); - CHECK(stride.defined()); - CHECK(base.dtype().is_scalar()); - CHECK(stride.dtype().is_scalar()); - CHECK_GT(lanes, 1); - CHECK_EQ(stride.dtype(), base.dtype()); + ICHECK(base.defined()); + ICHECK(stride.defined()); + ICHECK(base.dtype().is_scalar()); + ICHECK(stride.dtype().is_scalar()); + ICHECK_GT(lanes, 1); + ICHECK_EQ(stride.dtype(), base.dtype()); ObjectPtr node = make_object(); node->dtype = base.dtype().with_lanes(lanes); @@ -642,9 +642,9 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) // Broadcast Broadcast::Broadcast(PrimExpr value, int lanes) { - CHECK(value.defined()); - CHECK(value.dtype().is_scalar()); - CHECK_GT(lanes, 1); + ICHECK(value.defined()); + ICHECK(value.dtype().is_scalar()); + ICHECK_GT(lanes, 1); ObjectPtr node = make_object(); node->dtype = value.dtype().with_lanes(lanes); @@ -669,9 +669,9 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) // Let Let::Let(Var var, PrimExpr value, PrimExpr body) { - CHECK(value.defined()); - CHECK(body.defined()); - CHECK_EQ(value.dtype(), var.dtype()); + ICHECK(value.defined()); + ICHECK(body.defined()); + ICHECK_EQ(value.dtype(), var.dtype()); ObjectPtr node = make_object(); node->dtype = body.dtype(); @@ -700,7 +700,7 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) // Call Call::Call(DataType dtype, RelayExpr op, Array args) { for (size_t i = 0; i < args.size(); ++i) { - CHECK(args[i].defined()); + ICHECK(args[i].defined()); } ObjectPtr node = make_object(); @@ -714,7 +714,7 @@ TVM_REGISTER_GLOBAL("tir.Call") .set_body_typed([](DataType type, RelayExpr op, Array args) { Array prim_expr_args; for (const auto& it : args) { - CHECK(it->IsInstance() || it->IsInstance()); + ICHECK(it->IsInstance() || it->IsInstance()); if (const auto* str = it.as()) { prim_expr_args.push_back(StringImm(str->data)); } else { @@ -733,7 +733,7 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) p->stream << ptr_op->name << "("; } else { auto* ptr_gvar = op->op.as(); - CHECK(ptr_gvar != nullptr); + ICHECK(ptr_gvar != nullptr); p->stream << "@" << ptr_gvar->name_hint << "("; } for (size_t i = 0; i < op->args.size(); ++i) { @@ -747,17 +747,17 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) // Shuffle Shuffle::Shuffle(Array vectors, Array indices) { - CHECK_NE(vectors.size(), 0U); - CHECK_NE(indices.size(), 0U); + ICHECK_NE(vectors.size(), 0U); + ICHECK_NE(indices.size(), 0U); DataType base_type = vectors[0].dtype().element_of(); int total_lanes = 0; for (PrimExpr val : vectors) { - CHECK(val.dtype().element_of() == base_type); + ICHECK(val.dtype().element_of() == base_type); total_lanes += val.dtype().lanes(); } - CHECK_LE(indices.size(), static_cast(total_lanes)); + ICHECK_LE(indices.size(), static_cast(total_lanes)); ObjectPtr node = make_object(); node->dtype = base_type.with_lanes(static_cast(indices.size())); @@ -767,7 +767,7 @@ Shuffle::Shuffle(Array vectors, Array indices) { } PrimExpr Shuffle::Concat(Array vectors) { - CHECK_NE(vectors.size(), 0); + ICHECK_NE(vectors.size(), 0); if (vectors.size() == 1) { return vectors[0]; } @@ -824,9 +824,9 @@ CommReducer::CommReducer(Array lhs, Array rhs, Array result, } Array CommReducerNode::operator()(Array a, Array b) const { - CHECK_EQ(a.size(), b.size()); - CHECK_EQ(lhs.size(), a.size()); - CHECK_EQ(rhs.size(), b.size()); + ICHECK_EQ(a.size(), b.size()); + ICHECK_EQ(lhs.size(), a.size()); + ICHECK_EQ(rhs.size(), b.size()); Map value_map; for (size_t i = 0; i < a.size(); ++i) { value_map.Set(lhs[i], a[i]); @@ -859,21 +859,21 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) Reduce::Reduce(CommReducer combiner, Array source, Array axis, PrimExpr condition, int value_index, Array init) { for (size_t i = 0; i < axis.size(); ++i) { - CHECK_EQ(axis[i]->iter_type, kCommReduce) << "Can only take axis created by reduce_axis"; + ICHECK_EQ(axis[i]->iter_type, kCommReduce) << "Can only take axis created by reduce_axis"; } if (!condition.defined()) { condition = const_true(); } auto n = make_object(); - CHECK(source.defined()); + ICHECK(source.defined()); for (size_t i = 0; i < axis.size(); ++i) { - CHECK(axis[i].defined()); + ICHECK(axis[i].defined()); } if (!init.empty()) { - CHECK_EQ(init.size(), source.size()) << "Number of inits should match number of exprs"; + ICHECK_EQ(init.size(), source.size()) << "Number of inits should match number of exprs"; for (size_t i = 0; i < init.size(); i++) { - CHECK(init[i]->IsInstance() || init[i]->IsInstance() || - init[i]->IsInstance()) + ICHECK(init[i]->IsInstance() || init[i]->IsInstance() || + init[i]->IsInstance()) << "init can only be a IntImm, FloatImm or ProducerLoad"; } } diff --git a/src/tir/ir/stmt.cc b/src/tir/ir/stmt.cc index f45117791457..dbbc99c3abed 100644 --- a/src/tir/ir/stmt.cc +++ b/src/tir/ir/stmt.cc @@ -30,9 +30,9 @@ namespace tir { // LetStmt LetStmt::LetStmt(Var var, PrimExpr value, Stmt body) { - CHECK(value.defined()); - CHECK(body.defined()); - CHECK_EQ(value.dtype(), var.dtype()); + ICHECK(value.defined()); + ICHECK(body.defined()); + ICHECK_EQ(value.dtype(), var.dtype()); ObjectPtr node = make_object(); node->var = std::move(var); @@ -88,8 +88,8 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) // AssertStmt AssertStmt::AssertStmt(PrimExpr condition, PrimExpr message, Stmt body) { - CHECK(condition.defined()); - CHECK(message.dtype() == DataType::Int(32) || message.as()) + ICHECK(condition.defined()); + ICHECK(message.dtype() == DataType::Int(32) || message.as()) << "TypeError: AssertStmt message must be an int or string:" << message << "\n"; ObjectPtr node = make_object(); @@ -126,12 +126,12 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) // For For::For(Var loop_var, PrimExpr min, PrimExpr extent, ForType for_type, DeviceAPI device_api, Stmt body) { - CHECK(min.defined()); - CHECK(extent.defined()); - CHECK(min.dtype().is_scalar()); - CHECK(extent.dtype().is_scalar()); - CHECK(loop_var.dtype().is_scalar()); - CHECK(body.defined()); + ICHECK(min.defined()); + ICHECK(extent.defined()); + ICHECK(min.dtype().is_scalar()); + ICHECK(extent.dtype().is_scalar()); + ICHECK(loop_var.dtype().is_scalar()); + ICHECK(body.defined()); ObjectPtr node = make_object(); node->loop_var = std::move(loop_var); @@ -189,11 +189,11 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) // Store Store::Store(Var buffer_var, PrimExpr value, PrimExpr index, PrimExpr predicate) { - CHECK(value.defined()); - CHECK(index.defined()); - CHECK(predicate.defined()); - CHECK_EQ(value.dtype().lanes(), index.dtype().lanes()); - CHECK_EQ(value.dtype().lanes(), predicate.dtype().lanes()); + ICHECK(value.defined()); + ICHECK(index.defined()); + ICHECK(predicate.defined()); + ICHECK_EQ(value.dtype().lanes(), index.dtype().lanes()); + ICHECK_EQ(value.dtype().lanes(), predicate.dtype().lanes()); ObjectPtr node = make_object(); node->buffer_var = std::move(buffer_var); @@ -267,12 +267,12 @@ Allocate::Allocate(Var buffer_var, DataType dtype, Array extents, Prim // IsPointerPType(buffer_var->type_annotation, dtype) // once we fix the allocate tvm script printing. for (size_t i = 0; i < extents.size(); ++i) { - CHECK(extents[i].defined()); - CHECK(extents[i].dtype().is_scalar()); + ICHECK(extents[i].defined()); + ICHECK(extents[i].dtype().is_scalar()); } - CHECK(body.defined()); - CHECK(condition.defined()); - CHECK(condition.dtype().is_bool()); + ICHECK(body.defined()); + ICHECK(condition.defined()); + ICHECK(condition.dtype().is_bool()); ObjectPtr node = make_object(); node->buffer_var = std::move(buffer_var); @@ -326,14 +326,14 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) ProducerRealize::ProducerRealize(DataProducer producer, Region bounds, PrimExpr condition, Stmt body) { for (size_t i = 0; i < bounds.size(); ++i) { - CHECK(bounds[i]->min.defined()); - CHECK(bounds[i]->extent.defined()); - CHECK(bounds[i]->min.dtype().is_scalar()); - CHECK(bounds[i]->extent.dtype().is_scalar()); + ICHECK(bounds[i]->min.defined()); + ICHECK(bounds[i]->extent.defined()); + ICHECK(bounds[i]->min.dtype().is_scalar()); + ICHECK(bounds[i]->extent.dtype().is_scalar()); } - CHECK(body.defined()); - CHECK(condition.defined()); - CHECK(condition.dtype().is_bool()); + ICHECK(body.defined()); + ICHECK(condition.defined()); + ICHECK(condition.dtype().is_bool()); ObjectPtr node = make_object(); node->producer = std::move(producer); @@ -428,8 +428,8 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) // IfThenElse IfThenElse::IfThenElse(PrimExpr condition, Stmt then_case, Stmt else_case) { - CHECK(condition.defined()); - CHECK(then_case.defined()); + ICHECK(condition.defined()); + ICHECK(then_case.defined()); // else_case may be null. ObjectPtr node = make_object(); node->condition = std::move(condition); @@ -478,7 +478,7 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) // Evaluate Evaluate::Evaluate(PrimExpr value) { - CHECK(value.defined()); + ICHECK(value.defined()); ObjectPtr node = make_object(); node->value = std::move(value); diff --git a/src/tir/ir/transform.cc b/src/tir/ir/transform.cc index 62c790fab3ab..95c40f9a3c8e 100644 --- a/src/tir/ir/transform.cc +++ b/src/tir/ir/transform.cc @@ -88,7 +88,7 @@ PrimFuncPass::PrimFuncPass( // Perform Module -> Module optimizations at the PrimFunc level. IRModule PrimFuncPassNode::operator()(IRModule mod, const PassContext& pass_ctx) const { const PassInfo& pass_info = Info(); - CHECK(mod.defined()); + ICHECK(mod.defined()); pass_ctx.Trace(mod, pass_info, true); std::vector deleted_list; IRModuleNode* mod_ptr = mod.CopyOnWrite(); diff --git a/src/tir/op/op.cc b/src/tir/op/op.cc index 6d94a08cad5d..71321d2a3b02 100644 --- a/src/tir/op/op.cc +++ b/src/tir/op/op.cc @@ -107,7 +107,7 @@ void BinaryOpMatchTypes(PrimExpr& lhs, PrimExpr& rhs) { // NOLINT(*) } else if (rtype.lanes() == 1 && ltype.lanes() != 1) { rhs = tir::Broadcast(rhs, ltype.lanes()); } else { - CHECK(ltype.lanes() == rtype.lanes()) << "Cannot match type " << ltype << " vs " << rtype; + ICHECK(ltype.lanes() == rtype.lanes()) << "Cannot match type " << ltype << " vs " << rtype; } if (lhs.dtype() == rhs.dtype()) return; // Only do very simple type coversion @@ -146,7 +146,7 @@ void BinaryOpMatchTypes(PrimExpr& lhs, PrimExpr& rhs) { // NOLINT(*) // maximum and min limits PrimExpr max_value(const DataType& dtype) { using namespace tir; - CHECK_EQ(dtype.lanes(), 1); + ICHECK_EQ(dtype.lanes(), 1); if (dtype.is_int()) { if (dtype.bits() == 64) { return IntImm(dtype, std::numeric_limits::max()); @@ -178,10 +178,10 @@ PrimExpr max_value(const DataType& dtype) { PrimExpr min_value(const DataType& dtype) { using namespace tir; - CHECK_EQ(dtype.lanes(), 1); + ICHECK_EQ(dtype.lanes(), 1); if (datatype::Registry::Global()->GetTypeRegistered(dtype.code())) { auto f = datatype::GetMinFunc(dtype.code()); - CHECK(f) << "No minimum function registered for custom dtype " << (unsigned int)dtype.code(); + ICHECK(f) << "No minimum function registered for custom dtype " << (unsigned int)dtype.code(); // TODO(@hypercubestart) Document this change (and others associated with the overflowing // floatimm min bug) return (*f)(dtype.bits()); @@ -211,7 +211,7 @@ PrimExpr min_value(const DataType& dtype) { // infinity PrimExpr infinity(const DataType& dtype) { using namespace tir; - CHECK_EQ(dtype.lanes(), 1); + ICHECK_EQ(dtype.lanes(), 1); if (dtype.is_float()) { if (dtype.bits() == 64) { return FloatImm(dtype, std::numeric_limits::infinity()); @@ -273,7 +273,7 @@ PrimExpr cast(const DataType& t, PrimExpr value) { } return tir::Broadcast(value, t.lanes()); } else { - CHECK(value.dtype().lanes() == t.lanes()); + ICHECK(value.dtype().lanes() == t.lanes()); return tir::Cast(t, value); } } @@ -326,8 +326,8 @@ PrimExpr div(PrimExpr a, PrimExpr b) { } PrimExpr truncdiv(PrimExpr a, PrimExpr b) { - CHECK(a.dtype().is_int() || a.dtype().is_uint()) << a; - CHECK(b.dtype().is_int() || b.dtype().is_uint()) << b; + ICHECK(a.dtype().is_int() || a.dtype().is_uint()) << a; + ICHECK(b.dtype().is_int() || b.dtype().is_uint()) << b; return div(a, b); } @@ -348,8 +348,8 @@ PrimExpr indexdiv(PrimExpr a, PrimExpr b) { return floordiv(a, b); } PrimExpr indexmod(PrimExpr a, PrimExpr b) { return floormod(a, b); } PrimExpr floordiv(PrimExpr a, PrimExpr b) { - CHECK(a.dtype().is_int() || a.dtype().is_uint()) << a; - CHECK(b.dtype().is_int() || b.dtype().is_uint()) << b; + ICHECK(a.dtype().is_int() || a.dtype().is_uint()) << a; + ICHECK(b.dtype().is_int() || b.dtype().is_uint()) << b; BinaryOpMatchTypes(a, b); PrimExpr ret = arith::TryConstFold(a, b); if (ret.defined()) return ret; @@ -357,8 +357,8 @@ PrimExpr floordiv(PrimExpr a, PrimExpr b) { } PrimExpr floormod(PrimExpr a, PrimExpr b) { - CHECK(a.dtype().is_int() || a.dtype().is_uint()) << a; - CHECK(b.dtype().is_int() || b.dtype().is_uint()) << b; + ICHECK(a.dtype().is_int() || a.dtype().is_uint()) << a; + ICHECK(b.dtype().is_int() || b.dtype().is_uint()) << b; BinaryOpMatchTypes(a, b); PrimExpr ret = arith::TryConstFold(a, b); if (ret.defined()) return ret; @@ -395,7 +395,7 @@ PrimExpr max(PrimExpr a, PrimExpr b) { // if_then_else PrimExpr if_then_else(PrimExpr cond, PrimExpr true_value, PrimExpr false_value) { - CHECK(cond.dtype() == DataType::Bool(1)) + ICHECK(cond.dtype() == DataType::Bool(1)) << "if_then_else only accept the condition to be boolean type."; BinaryOpMatchTypes(true_value, false_value); if (const IntImmNode* op = cond.as()) { @@ -460,23 +460,23 @@ PrimExpr operator!=(PrimExpr a, PrimExpr b) { } PrimExpr operator&&(PrimExpr a, PrimExpr b) { - CHECK(a.dtype().is_bool()); - CHECK(b.dtype().is_bool()); + ICHECK(a.dtype().is_bool()); + ICHECK(b.dtype().is_bool()); PrimExpr ret = arith::TryConstFold(a, b); if (ret.defined()) return ret; return tir::And(a, b); } PrimExpr operator||(PrimExpr a, PrimExpr b) { - CHECK(a.dtype().is_bool()); - CHECK(b.dtype().is_bool()); + ICHECK(a.dtype().is_bool()); + ICHECK(b.dtype().is_bool()); PrimExpr ret = arith::TryConstFold(a, b); if (ret.defined()) return ret; return tir::Or(a, b); } PrimExpr operator!(PrimExpr a) { - CHECK(a.dtype().is_bool()); + ICHECK(a.dtype().is_bool()); PrimExpr ret = arith::TryConstFold(a); if (ret.defined()) return ret; return tir::Not(a); @@ -484,13 +484,13 @@ PrimExpr operator!(PrimExpr a) { // shirt right PrimExpr operator>>(PrimExpr a, PrimExpr b) { - CHECK(a.dtype().is_int() || a.dtype().is_uint()); - CHECK(b.dtype().is_int() || b.dtype().is_uint()); + ICHECK(a.dtype().is_int() || a.dtype().is_uint()); + ICHECK(b.dtype().is_int() || b.dtype().is_uint()); BinaryOpMatchTypes(a, b); TVM_INDEX_CONST_PROPAGATION({ const DataType& rtype = a.dtype(); if (pb) - CHECK(pb->value >= 0 && pb->value < rtype.bits()) + ICHECK(pb->value >= 0 && pb->value < rtype.bits()) << "Shift amount must be non-negative and less than " << rtype.bits() << " for type " << rtype; if (pa && pb) return IntImm(rtype, (pa->value >> pb->value)); @@ -504,13 +504,13 @@ PrimExpr operator>>(PrimExpr a, PrimExpr b) { // shift left PrimExpr operator<<(PrimExpr a, PrimExpr b) { - CHECK(a.dtype().is_int() || a.dtype().is_uint()); - CHECK(b.dtype().is_int() || b.dtype().is_uint()); + ICHECK(a.dtype().is_int() || a.dtype().is_uint()); + ICHECK(b.dtype().is_int() || b.dtype().is_uint()); BinaryOpMatchTypes(a, b); TVM_INDEX_CONST_PROPAGATION({ const DataType& rtype = a.dtype(); if (pb) - CHECK(pb->value >= 0 && pb->value < rtype.bits()) + ICHECK(pb->value >= 0 && pb->value < rtype.bits()) << "Shift amount must be non-negative and less than " << rtype.bits() << " for type " << rtype; if (pa && pb) return IntImm(rtype, (pa->value << pb->value)); @@ -523,8 +523,8 @@ PrimExpr operator<<(PrimExpr a, PrimExpr b) { // bitwise and PrimExpr operator&(PrimExpr a, PrimExpr b) { - CHECK(a.dtype().is_int() || a.dtype().is_uint()); - CHECK(b.dtype().is_int() || b.dtype().is_uint()); + ICHECK(a.dtype().is_int() || a.dtype().is_uint()); + ICHECK(b.dtype().is_int() || b.dtype().is_uint()); BinaryOpMatchTypes(a, b); TVM_INDEX_CONST_PROPAGATION({ const DataType& rtype = a.dtype(); @@ -535,8 +535,8 @@ PrimExpr operator&(PrimExpr a, PrimExpr b) { // bitwise_or PrimExpr operator|(PrimExpr a, PrimExpr b) { - CHECK(a.dtype().is_int() || a.dtype().is_uint()); - CHECK(b.dtype().is_int() || b.dtype().is_uint()); + ICHECK(a.dtype().is_int() || a.dtype().is_uint()); + ICHECK(b.dtype().is_int() || b.dtype().is_uint()); BinaryOpMatchTypes(a, b); TVM_INDEX_CONST_PROPAGATION({ const DataType& rtype = a.dtype(); @@ -547,8 +547,8 @@ PrimExpr operator|(PrimExpr a, PrimExpr b) { // bitwise_xor PrimExpr operator^(PrimExpr a, PrimExpr b) { - CHECK(a.dtype().is_int() || a.dtype().is_uint()); - CHECK(b.dtype().is_int() || b.dtype().is_uint()); + ICHECK(a.dtype().is_int() || a.dtype().is_uint()); + ICHECK(b.dtype().is_int() || b.dtype().is_uint()); BinaryOpMatchTypes(a, b); TVM_INDEX_CONST_PROPAGATION({ const DataType& rtype = a.dtype(); @@ -559,7 +559,7 @@ PrimExpr operator^(PrimExpr a, PrimExpr b) { // bitwie_not PrimExpr operator~(PrimExpr a) { - CHECK(a.dtype().is_int() || a.dtype().is_uint()); + ICHECK(a.dtype().is_int() || a.dtype().is_uint()); return tir::Call(a.dtype(), tir::builtin::bitwise_not(), {a}); } @@ -568,7 +568,7 @@ TVM_REGISTER_GLOBAL("tir.bitwise_not").set_body_typed([](PrimExpr a) { return ~a // pow PrimExpr pow(PrimExpr x, PrimExpr y) { BinaryOpMatchTypes(x, y); - CHECK(x.dtype().is_float()) << "power only applies to float"; + ICHECK(x.dtype().is_float()) << "power only applies to float"; static auto op = Op::Get("tir.pow"); return tir::Call(x.dtype(), op, {x, y}); } @@ -652,7 +652,7 @@ PrimExpr sum(PrimExpr source, Array rdom, Array init) { } PrimExpr all(PrimExpr source, Array rdom, Array init) { - CHECK(source.dtype().is_bool()); + ICHECK(source.dtype().is_bool()); Var x("x", source.dtype()), y("y", source.dtype()); PrimExpr result = tir::And(x, y); PrimExpr identity_element = make_const(source.dtype(), true); @@ -661,7 +661,7 @@ PrimExpr all(PrimExpr source, Array rdom, Array init) { } PrimExpr any(PrimExpr source, Array rdom, Array init) { - CHECK(source.dtype().is_bool()); + ICHECK(source.dtype().is_bool()); Var x("x", source.dtype()), y("y", source.dtype()); PrimExpr result = tir::Or(x, y); PrimExpr identity_element = make_const(source.dtype(), false); @@ -696,7 +696,7 @@ PrimExpr prod(PrimExpr source, Array rdom, Array init) { // fmod PrimExpr fmod(PrimExpr x, PrimExpr y) { BinaryOpMatchTypes(x, y); - CHECK(x.dtype().is_float()) << "fmod only applies to float"; + ICHECK(x.dtype().is_float()) << "fmod only applies to float"; static auto op = Op::Get("tir.fmod"); return tir::Call(x.dtype(), op, {x, y}); } diff --git a/src/tir/transforms/arg_binder.cc b/src/tir/transforms/arg_binder.cc index 1faa6267b4fe..1b58bfa38b40 100644 --- a/src/tir/transforms/arg_binder.cc +++ b/src/tir/transforms/arg_binder.cc @@ -49,7 +49,7 @@ void BinderAddAssert(arith::Analyzer* ana, PrimExpr cond, const std::string& arg bool ArgBinder::Bind_(const PrimExpr& arg, const PrimExpr& value, const std::string& arg_name, bool with_lets) { - CHECK_EQ(arg.dtype(), value.dtype()); + ICHECK_EQ(arg.dtype(), value.dtype()); if (const VarNode* v = arg.as()) { auto it = def_map_->find(v); if (it == def_map_->end()) { @@ -78,7 +78,7 @@ void ArgBinder::Bind(const PrimExpr& arg, const PrimExpr& value, const std::stri void ArgBinder::BindArray(const Array& arg, const Array& value, const std::string& arg_name) { - CHECK_EQ(arg.size(), value.size()) << "Argument " << arg_name << " array size mismatch"; + ICHECK_EQ(arg.size(), value.size()) << "Argument " << arg_name << " array size mismatch"; for (size_t i = 0; i < arg.size(); ++i) { std::ostringstream os; os << arg_name << "[" << i << "]"; @@ -88,8 +88,8 @@ void ArgBinder::BindArray(const Array& arg, const Array& val void ArgBinder::BindBuffer(const Buffer& arg, const Buffer& value, const std::string& arg_name, bool fuzzy_match) { - CHECK_EQ(arg->scope, value->scope) << "Argument " << arg_name << " Buffer bind scope mismatch"; - CHECK_EQ(arg->dtype, value->dtype) + ICHECK_EQ(arg->scope, value->scope) << "Argument " << arg_name << " Buffer bind scope mismatch"; + ICHECK_EQ(arg->dtype, value->dtype) << "Argument " << arg_name << " Buffer bind data type mismatch"; if (value->data_alignment % arg->data_alignment != 0) { LOG(WARNING) << "Trying to bind buffer to another one with lower alignment requirement " @@ -98,7 +98,7 @@ void ArgBinder::BindBuffer(const Buffer& arg, const Buffer& value, const std::st } // bind pointer and offset. if (is_zero(arg->elem_offset)) { - CHECK(is_zero(value->elem_offset)) + ICHECK(is_zero(value->elem_offset)) << "Trying to bind a Buffer with offset into one without offset " << " required elem_offset=" << arg->elem_offset << ", provided elem_offset=" << value->elem_offset; @@ -116,10 +116,10 @@ void ArgBinder::BindBuffer(const Buffer& arg, const Buffer& value, const std::st } if (arg->shape.size() < value->shape.size()) { - CHECK(fuzzy_match) << "Argument " << arg_name << " size mismatch"; + ICHECK(fuzzy_match) << "Argument " << arg_name << " size mismatch"; size_t diff = value->shape.size() - arg->shape.size(); for (size_t i = 0; i < diff; ++i) { - CHECK(is_one(analyzer_.Simplify(value->shape[i]))) + ICHECK(is_one(analyzer_.Simplify(value->shape[i]))) << "Argument " << arg_name << " shape mismatch" << arg->shape << " vs " << value->shape; } for (size_t i = 0; i < arg->shape.size(); ++i) { @@ -128,8 +128,8 @@ void ArgBinder::BindBuffer(const Buffer& arg, const Buffer& value, const std::st this->Bind(arg->shape[i], value->shape[i + diff], os.str()); } if (value->strides.size() != 0) { - CHECK_EQ(arg->strides.size(), arg->shape.size()); - CHECK_EQ(value->strides.size(), value->shape.size()); + ICHECK_EQ(arg->strides.size(), arg->shape.size()); + ICHECK_EQ(value->strides.size(), value->shape.size()); for (size_t i = 0; i < arg->strides.size(); ++i) { std::ostringstream os; os << arg_name << ".strides[" << i << "]"; diff --git a/src/tir/transforms/bf16_legalize.cc b/src/tir/transforms/bf16_legalize.cc index 97c96edc6ca7..7a8789457923 100644 --- a/src/tir/transforms/bf16_legalize.cc +++ b/src/tir/transforms/bf16_legalize.cc @@ -50,10 +50,10 @@ class BF16PromoteRewriter : public StmtExprMutator { auto b = this->VisitExpr(orig_b); *is_bfloat16 = false; if (a->dtype.is_bfloat16()) { - CHECK(b->dtype.is_bfloat16()); + ICHECK(b->dtype.is_bfloat16()); *is_bfloat16 = true; } else if (b->dtype.is_bfloat16()) { - CHECK(a->dtype.is_bfloat16()); + ICHECK(a->dtype.is_bfloat16()); *is_bfloat16 = true; } @@ -182,14 +182,14 @@ class BF16LowerRewriter : public StmtExprMutator { auto op_val = StmtExprMutator::VisitExpr(op->value); if (op->value->dtype.is_bfloat16()) { // if is cast_from_bf16, check if is to fp32 - CHECK(op->dtype.is_float() && op->dtype.bits() == 32); + ICHECK(op->dtype.is_float() && op->dtype.bits() == 32); auto uint32_dtype = DataType(kDLUInt, 32, op_val->dtype.lanes()); auto uint32_v = Cast(uint32_dtype, op_val); // to be endian invariant. return Call(op->dtype, builtin::reinterpret(), {uint32_v << 16}); } else if (op->dtype.is_bfloat16()) { // if is cast_to_bf16, check if op->value is fp32 - CHECK(op->value->dtype.is_float() && op->value->dtype.bits() == 32); + ICHECK(op->value->dtype.is_float() && op->value->dtype.bits() == 32); auto uint32_dtype = DataType(kDLUInt, 32, op_val->dtype.lanes()); auto uint32_v = Call(uint32_dtype, builtin::reinterpret(), {op_val}); auto uint16_dtype = DataType(kDLUInt, 16, op_val->dtype.lanes()); @@ -299,7 +299,7 @@ class BF16LowerRewriter : public StmtExprMutator { if (op->dtype.is_bfloat16()) { auto it = var_remap_.find(op->buffer_var); - CHECK(it != var_remap_.end()) << "bfloat* var needs to be remapped"; + ICHECK(it != var_remap_.end()) << "bfloat* var needs to be remapped"; return Load(DataType::UInt(16, op->dtype.lanes()), it->second, op->index, op->predicate); } else { return ret; diff --git a/src/tir/transforms/combine_context_call.cc b/src/tir/transforms/combine_context_call.cc index 0485bb1f7613..03a0d5e751cf 100644 --- a/src/tir/transforms/combine_context_call.cc +++ b/src/tir/transforms/combine_context_call.cc @@ -42,13 +42,13 @@ class ContextCallCombiner final : public StmtExprMutator { public: PrimExpr VisitExpr_(const CallNode* op) final { if (op->op.same_as(builtin::tvm_thread_context())) { - CHECK_EQ(op->args.size(), 1U); + ICHECK_EQ(op->args.size(), 1U); PrimExpr ctx = op->args[0]; auto it = ctx_map_.find(ctx); if (it != ctx_map_.end()) { return it->second; } else { - CHECK(ctx.dtype().is_handle()); + ICHECK(ctx.dtype().is_handle()); Var ctx_var("ctx_cache_", ctx.dtype()); ctx_map_[ctx] = ctx_var; return std::move(ctx_var); diff --git a/src/tir/transforms/coproc_sync.cc b/src/tir/transforms/coproc_sync.cc index 9de9eaa8a639..f9245442d268 100644 --- a/src/tir/transforms/coproc_sync.cc +++ b/src/tir/transforms/coproc_sync.cc @@ -149,7 +149,7 @@ class CoProcSyncPlanner : public StorageAccessVisitor { } } if (sync_write) { - CHECK_NE(i, 0U); + ICHECK_NE(i, 0U); sync_[seq[i - 1].stmt] = GetSync(co_access); co_access.clear(); contain_sync = true; @@ -175,7 +175,7 @@ class CoProcSyncPlanner : public StorageAccessVisitor { } } if (sync_at_end && co_access.size() != 0) { - CHECK_NE(seq.size(), 0); + ICHECK_NE(seq.size(), 0); contain_sync = true; sync_[seq.back().stmt] = GetSync(co_access); co_access.clear(); @@ -190,8 +190,8 @@ class CoProcSyncPlanner : public StorageAccessVisitor { // Add write Synchronization std::vector GetSync(const std::vector& co_access) { // Does not consider memory coherence, need runtime. - CHECK_NE(co_access.size(), 0U); - CHECK_EQ(co_access[0].threads.size(), 1U); + ICHECK_NE(co_access.size(), 0U); + ICHECK_EQ(co_access[0].threads.size(), 1U); return GetSync(coproc_name_ + ".coproc_sync"); } @@ -250,7 +250,7 @@ class CoProcBarrierDetector : public StorageAccessVisitor { auto fupdate = [&](size_t i, const AccessEntry& acc) { auto it = write_set.find(acc.buffer.get()); if (it != write_set.end()) { - CHECK_NE(i, 0U); + ICHECK_NE(i, 0U); barrier_after_[seq[i - 1].stmt].push_back(MakeBarrier(write_barrier_name_, it->second)); write_set.erase(it); } @@ -288,7 +288,7 @@ class CoProcBarrierDetector : public StorageAccessVisitor { auto fupdate = [&](size_t i, const AccessEntry& acc) { auto it = read_set.find(acc.buffer.get()); if (it != read_set.end()) { - CHECK_NE(i, seq.size()); + ICHECK_NE(i, seq.size()); barrier_before_[seq[i].stmt].push_back(MakeBarrier(read_barrier_name_, it->second)); read_set.erase(it); } @@ -324,12 +324,12 @@ class CoProcBarrierDetector : public StorageAccessVisitor { // insert write point Array wset; for (const AccessEntry& acc : wvec) { - CHECK(acc.dtype == wvec[0].dtype); + ICHECK(acc.dtype == wvec[0].dtype); wset.push_back(acc.touched); } Range none; Range r = arith::Union(wset).CoverRange(none); - CHECK(r.defined()) << "Cannot deduce write range of " << wvec[0].buffer; + ICHECK(r.defined()) << "Cannot deduce write range of " << wvec[0].buffer; PrimExpr min = r->min; PrimExpr extent = r->extent; return Evaluate(Call(DataType::Int(32), Op::Get(func), @@ -361,7 +361,7 @@ class CoProcInstDepDetector : public StmtVisitor { void VisitStmt_(const AttrStmtNode* op) final { if (op->attr_key == attr::coproc_scope && op->node.same_as(coproc_axis_)) { const IntImmNode* ctx_id = op->value.as(); - CHECK(ctx_id != nullptr); + ICHECK(ctx_id != nullptr); curr_state_.clear(); curr_state_.node = op->body.get(); curr_state_.enter_ctx.insert(ctx_id->value); @@ -380,7 +380,7 @@ class CoProcInstDepDetector : public StmtVisitor { curr_state_.clear(); if (last_state_.node != nullptr) { curr_state_.node = op; - CHECK(first_state_.node != nullptr); + ICHECK(first_state_.node != nullptr); // loop carry dependency InjectSync(last_state_, first_state_, &(curr_state_.exit_push), &(curr_state_.enter_pop)); curr_state_.enter_ctx = first_state_.enter_ctx; @@ -548,7 +548,7 @@ class CoProcInstDepDetector : public StmtVisitor { InjectSync(last_state_, curr_state_, &t1, &t2); std::swap(last_state_, curr_state_); } else { - CHECK(first_state_.node == nullptr); + ICHECK(first_state_.node == nullptr); first_state_ = curr_state_; last_state_ = curr_state_; } @@ -582,7 +582,7 @@ class CoProcSyncInserter : public StmtMutator { touched.insert(kv.first); } } - CHECK_EQ(visitor.coproc_.size(), 1U); + ICHECK_EQ(visitor.coproc_.size(), 1U); std::string coproc_name = (*visitor.coproc_.begin())->var->name_hint; // plan sync. CoProcSyncPlanner sync_planner(touched, coproc_name); diff --git a/src/tir/transforms/hoist_if_then_else.cc b/src/tir/transforms/hoist_if_then_else.cc index 9db800c2a6d2..7bae0ce8ca75 100644 --- a/src/tir/transforms/hoist_if_then_else.cc +++ b/src/tir/transforms/hoist_if_then_else.cc @@ -248,7 +248,7 @@ class HoistCandidateSelector final : public StmtExprVisitor { private: void ResetRecorderInternal() { if (is_recorder_on_) { - CHECK_GT(ordered_list_.size(), 0); + ICHECK_GT(ordered_list_.size(), 0); is_recorder_on_ = false; } ordered_list_.clear(); diff --git a/src/tir/transforms/inject_copy_intrin.cc b/src/tir/transforms/inject_copy_intrin.cc index b27459f4bd45..f7443c74c0f7 100644 --- a/src/tir/transforms/inject_copy_intrin.cc +++ b/src/tir/transforms/inject_copy_intrin.cc @@ -47,7 +47,7 @@ class CopyIntrinInjector : public StmtMutator { storage_scope_[buf] = op->value.as()->value; } else if (op->attr_key == pragma_key_) { Stmt ret; - CHECK(MatchCopyPattern(op->body, &ret)) << "Cannot match copy pattern of " << op->body; + ICHECK(MatchCopyPattern(op->body, &ret)) << "Cannot match copy pattern of " << op->body; return ret; } return StmtMutator::VisitStmt_(op); @@ -76,7 +76,7 @@ class CopyIntrinInjector : public StmtMutator { const CastNode* cast = store->value.as(); const LoadNode* load = store->value.as(); if (0 == loops.size()) { - CHECK(!has_cond); + ICHECK(!has_cond); } // for now only support true condition matching if (has_cond) { @@ -112,8 +112,8 @@ class CopyIntrinInjector : public StmtMutator { Array clip_bound = arith::DetectClipBound(sel_cond.Eval(), loop_vars); pad_value = sel_false_value.Eval(); if (clip_bound.size() == 0) return false; - CHECK_EQ(src_shape.size(), loop_vars.size()); - CHECK_EQ(clip_bound.size(), loop_vars.size() * 2); + ICHECK_EQ(src_shape.size(), loop_vars.size()); + ICHECK_EQ(clip_bound.size(), loop_vars.size() * 2); for (size_t i = 0; i < src_shape.size(); ++i) { PrimExpr min_value = clip_bound[2 * i]; PrimExpr max_value = clip_bound[2 * i + 1]; @@ -139,8 +139,8 @@ class CopyIntrinInjector : public StmtMutator { } src_elem_offset = analyzer_.Simplify(src_elem_offset); } - CHECK_EQ(load_strides.size(), store_strides.size()); - CHECK_EQ(load_strides.size(), loop_var_size + 1); + ICHECK_EQ(load_strides.size(), store_strides.size()); + ICHECK_EQ(load_strides.size(), loop_var_size + 1); Array src_strides(load_strides.begin(), load_strides.begin() + loop_var_size); Array dst_strides(store_strides.begin(), store_strides.begin() + loop_var_size); if (loop_var_size == 0) { @@ -154,7 +154,7 @@ class CopyIntrinInjector : public StmtMutator { load->buffer_var->name_hint, GetStorageScope(load->buffer_var.get()), 0, 0, kDefault); *out = flower_copy_fromto_(src, dst, pad_before, pad_after, pad_value); - CHECK(out->defined()) << "flower function did not return correct stmt"; + ICHECK(out->defined()) << "flower function did not return correct stmt"; return true; } // Get storage scope diff --git a/src/tir/transforms/inject_double_buffer.cc b/src/tir/transforms/inject_double_buffer.cc index 1eea43d27d46..8de446727a71 100644 --- a/src/tir/transforms/inject_double_buffer.cc +++ b/src/tir/transforms/inject_double_buffer.cc @@ -123,7 +123,7 @@ class DoubleBufferInjector : public StmtExprMutator { for (PrimExpr e : op->extents) { new_extents.push_back(e); } - CHECK(it->second.loop != nullptr); + ICHECK(it->second.loop != nullptr); auto& alloc_nest = loop_allocs_[it->second.loop]; alloc_nest.emplace_back( AttrStmt(op->buffer_var, attr::storage_scope, StringImm(it->second.scope), Evaluate(0))); @@ -143,9 +143,9 @@ class DoubleBufferInjector : public StmtExprMutator { const ForNode* old_loop = stmt.as(); if (split_loop_ != 0) { // Explicitly unroll the loop - CHECK(split_loop_ % 2 == 0 || split_loop_ == 1) + ICHECK(split_loop_ % 2 == 0 || split_loop_ == 1) << "It is better to split with multiple of 2"; - CHECK(is_zero(old_loop->min)); + ICHECK(is_zero(old_loop->min)); PrimExpr zero = old_loop->min; PrimExpr new_ext = old_loop->extent - make_const(old_loop->loop_var.dtype(), 1); PrimExpr factor = make_const(new_ext.dtype(), split_loop_); @@ -186,8 +186,8 @@ class DoubleBufferInjector : public StmtExprMutator { auto it = dbuffer_info_.find(op->buffer_var.get()); if (it != dbuffer_info_.end()) { const StorageEntry& e = it->second; - CHECK(in_double_buffer_scope_); - CHECK(e.stride.defined()); + ICHECK(in_double_buffer_scope_); + ICHECK(e.stride.defined()); return Store(op->buffer_var, op->value, e.switch_write_var * e.stride + op->index, op->predicate); } else { @@ -201,8 +201,8 @@ class DoubleBufferInjector : public StmtExprMutator { auto it = dbuffer_info_.find(op->buffer_var.get()); if (it != dbuffer_info_.end()) { const StorageEntry& e = it->second; - CHECK(e.stride.defined()); - CHECK(e.switch_read_var.defined()); + ICHECK(e.stride.defined()); + ICHECK(e.switch_read_var.defined()); return Load(op->dtype, op->buffer_var, e.switch_read_var * e.stride + op->index, op->predicate); } else { @@ -211,14 +211,14 @@ class DoubleBufferInjector : public StmtExprMutator { } PrimExpr VisitExpr_(const VarNode* op) final { - CHECK(!dbuffer_info_.count(op)); + ICHECK(!dbuffer_info_.count(op)); return GetRef(op); } private: Stmt MakeProducer(const AttrStmtNode* op) { const Var buffer = Downcast(op->node); - CHECK_NE(loop_nest_.size(), 0U) << "Double buffer scope must be inside a loop"; + ICHECK_NE(loop_nest_.size(), 0U) << "Double buffer scope must be inside a loop"; auto it = dbuffer_info_.find(buffer.get()); if (it == dbuffer_info_.end()) { LOG(WARNING) << "Skip double buffer scope " << op->node; diff --git a/src/tir/transforms/inject_prefetch.cc b/src/tir/transforms/inject_prefetch.cc index 4e4f33baed2b..b5c4cf5ec582 100644 --- a/src/tir/transforms/inject_prefetch.cc +++ b/src/tir/transforms/inject_prefetch.cc @@ -44,7 +44,7 @@ class PrefetchInjector : public StmtMutator { op = ret.as(); if (op && op->attr_key == attr::prefetch_scope) { Buffer buffer = Downcast(op->node); - CHECK_NE(loop_nest_.size(), 0U); + ICHECK_NE(loop_nest_.size(), 0U); Region domain = DomainTouched(op->body, buffer, true, false); Region region; diff --git a/src/tir/transforms/inject_virtual_thread.cc b/src/tir/transforms/inject_virtual_thread.cc index c0a0b08f22a0..9a77449ecfa2 100644 --- a/src/tir/transforms/inject_virtual_thread.cc +++ b/src/tir/transforms/inject_virtual_thread.cc @@ -58,8 +58,8 @@ class ExprTouched final : public StmtExprVisitor { if (op->op.same_as(builtin::tvm_access_ptr())) { const auto* rw_mask = op->args[4].as(); const VarNode* buffer_var = op->args[1].as(); - CHECK(buffer_var); - CHECK(rw_mask); + ICHECK(buffer_var); + ICHECK(rw_mask); // read if (rw_mask->value & 1) { HandleUseVar(buffer_var); @@ -182,7 +182,7 @@ class VTInjector : public StmtExprMutator { allow_share_(allow_share) {} // Inject VTLoop when needed. Stmt VisitStmt(const Stmt& s) final { - CHECK(!visit_touched_var_); + ICHECK(!visit_touched_var_); auto stmt = StmtExprMutator::VisitStmt(s); if (visit_touched_var_ || trigger_base_inject_) { if (!vt_loop_injected_) { @@ -195,7 +195,7 @@ class VTInjector : public StmtExprMutator { } // Variable PrimExpr VisitExpr_(const VarNode* op) final { - CHECK(!alloc_remap_.count(op)) << "Buffer address may get rewritten in virtual thread"; + ICHECK(!alloc_remap_.count(op)) << "Buffer address may get rewritten in virtual thread"; if (touched_var_.count(op)) { visit_touched_var_ = true; } @@ -221,7 +221,7 @@ class VTInjector : public StmtExprMutator { // Expression. PrimExpr VisitExpr_(const CallNode* op) final { if (op->op.same_as(builtin::tvm_access_ptr())) { - CHECK_EQ(op->args.size(), 5U); + ICHECK_EQ(op->args.size(), 5U); DataType dtype = op->args[0].dtype(); const VarNode* buffer = op->args[1].as(); auto it = alloc_remap_.find(buffer); @@ -290,7 +290,7 @@ class VTInjector : public StmtExprMutator { } // For Stmt VisitStmt_(const ForNode* op) final { - CHECK(is_zero(op->min)); + ICHECK(is_zero(op->min)); PrimExpr extent = this->VisitExpr(op->extent); if (visit_touched_var_ && !vt_loop_injected_) { Stmt stmt = InjectVTLoop(GetRef(op), true); @@ -313,7 +313,7 @@ class VTInjector : public StmtExprMutator { return InjectVTLoop(GetRef(op), true); } visit_touched_var_ = false; - CHECK_EQ(max_loop_depth_, 0); + ICHECK_EQ(max_loop_depth_, 0); Stmt then_case = this->VisitStmt(op->then_case); Stmt else_case; if (op->else_case.defined()) { @@ -332,7 +332,7 @@ class VTInjector : public StmtExprMutator { // Seq Stmt VisitStmt_(const SeqStmtNode* op) final { - CHECK_EQ(max_loop_depth_, 0); + ICHECK_EQ(max_loop_depth_, 0); auto fmutate = [this](const Stmt& s) { int temp = max_loop_depth_; max_loop_depth_ = 0; @@ -392,7 +392,7 @@ class VTInjector : public StmtExprMutator { // inject vthread loop Stmt InjectVTLoop(Stmt stmt, bool before_mutation) { - CHECK(!vt_loop_injected_); + ICHECK(!vt_loop_injected_); // reset the flags visit_touched_var_ = false; trigger_base_inject_ = false; diff --git a/src/tir/transforms/ir_utils.cc b/src/tir/transforms/ir_utils.cc index d6c7300f2edb..838194203b5b 100644 --- a/src/tir/transforms/ir_utils.cc +++ b/src/tir/transforms/ir_utils.cc @@ -38,38 +38,38 @@ Stmt MergeNest(const std::vector& nest, Stmt body) { Stmt s = *ri; if (const auto* for_ = s.as()) { auto n = make_object(*for_); - CHECK(is_no_op(n->body)); + ICHECK(is_no_op(n->body)); n->body = body; body = Stmt(n); } else if (const auto* let = s.as()) { auto n = make_object(*let); - CHECK(is_no_op(n->body)); + ICHECK(is_no_op(n->body)); n->body = body; body = Stmt(n); } else if (const auto* attr = s.as()) { auto n = make_object(*attr); - CHECK(is_no_op(n->body)); + ICHECK(is_no_op(n->body)); n->body = body; body = Stmt(n); } else if (const auto* ite = s.as()) { auto n = make_object(*ite); - CHECK(is_no_op(n->then_case)); - CHECK(!n->else_case.defined()); + ICHECK(is_no_op(n->then_case)); + ICHECK(!n->else_case.defined()); n->then_case = body; body = Stmt(n); } else if (const auto* seq = s.as()) { auto n = make_object(*seq); - CHECK(n->size() != 0 && is_no_op(n->seq[n->size() - 1])); + ICHECK(n->size() != 0 && is_no_op(n->seq[n->size() - 1])); n->seq.Set(n->size() - 1, body); body = Stmt(n); } else if (const auto* assert_ = s.as()) { auto n = make_object(*assert_); - CHECK(is_no_op(n->body)); + ICHECK(is_no_op(n->body)); n->body = body; body = Stmt(n); } else if (const auto* alloc = s.as()) { auto n = make_object(*alloc); - CHECK(is_no_op(n->body)); + ICHECK(is_no_op(n->body)); n->body = body; body = Stmt(n); } else { @@ -177,7 +177,7 @@ class IRConvertSSA final : public StmtExprMutator { Stmt new_alloc = this->VisitStmt(op->body); if (new_alloc.same_as(op->body)) return GetRef(op); alloc = new_alloc.as(); - CHECK(alloc); + ICHECK(alloc); return AttrStmt(alloc->buffer_var, op->attr_key, op->value, new_alloc); } } diff --git a/src/tir/transforms/ir_utils.h b/src/tir/transforms/ir_utils.h index eb7a246957d2..3b4e693b820a 100644 --- a/src/tir/transforms/ir_utils.h +++ b/src/tir/transforms/ir_utils.h @@ -138,9 +138,9 @@ inline Stmt TVMStructSet(Var handle, int index, builtin::TVMStructFieldKind kind */ inline DataType APIType(DataType t) { if (t.is_handle()) return t; - CHECK_EQ(t.lanes(), 1) << "Cannot pass vector type through packed API."; + ICHECK_EQ(t.lanes(), 1) << "Cannot pass vector type through packed API."; if (t.is_uint() || t.is_int()) return DataType::Int(64); - CHECK(t.is_float()); + ICHECK(t.is_float()); return DataType::Float(64); } diff --git a/src/tir/transforms/lift_attr_scope.cc b/src/tir/transforms/lift_attr_scope.cc index 44b121a7b559..27dd583b8b42 100644 --- a/src/tir/transforms/lift_attr_scope.cc +++ b/src/tir/transforms/lift_attr_scope.cc @@ -88,7 +88,7 @@ class AttrScopeLifter : public StmtMutator { if (attr_node.size() == 0) return ret; op = ret.as(); - CHECK(op != nullptr); + ICHECK(op != nullptr); Array reorg; // check if all decorations are common. for (size_t begin = 0; begin < attr_node.size();) { diff --git a/src/tir/transforms/loop_partition.cc b/src/tir/transforms/loop_partition.cc index 68c43fac1170..ab567dc0e417 100644 --- a/src/tir/transforms/loop_partition.cc +++ b/src/tir/transforms/loop_partition.cc @@ -121,7 +121,7 @@ class CandidateSelector final : public StmtExprVisitor { void VisitStmt_(const AttrStmtNode* op) final { if (op->attr_key == attr::thread_extent) { const IterVarNode* iv = op->node.as(); - CHECK(iv); + ICHECK(iv); Var var = iv->var; runtime::ThreadScope scope = runtime::ThreadScope::Create(iv->thread_tag); if ((scope.rank == 0) && (!is_const_int(op->value) || partition_const_loop_)) { @@ -210,7 +210,7 @@ class PartitionFinder : public StmtExprVisitor { // handle thread_axis if (op->attr_key == attr::thread_extent) { const IterVarNode* thread_axis = op->node.as(); - CHECK(thread_axis); + ICHECK(thread_axis); const VarNode* var = thread_axis->var.get(); IntSet dom = IntSet::FromRange(Range(make_zero(op->value.dtype()), op->value)); hint_map_.insert({var, dom}); @@ -363,7 +363,7 @@ class LoopPartitioner : public StmtMutator { } const IterVarNode* iv = op->node.as(); - CHECK(iv); + ICHECK(iv); Var var = iv->var; auto as = GetRef(op); if (selector.candidates.count(as)) { @@ -595,7 +595,7 @@ Stmt LoopPartitioner::TryPartition(const Stmt& stmt, Var var, PrimExpr min, Prim inline Stmt LoopPartitioner::MakeFor(const Object* node, PrimExpr extent, Stmt body) { const ForNode* for_node = static_cast(node); - CHECK(for_node); + ICHECK(for_node); if (analyzer_.CanProve(extent == make_const(DataType::Int(32), 1))) { // If the loop extent is 1, do not create the loop anymore return Substitute(body, {{Var{for_node->loop_var}, make_const(DataType::Int(32), 0)}}); @@ -609,7 +609,7 @@ class RemoveLikelyTags : public StmtExprMutator { public: PrimExpr VisitExpr_(const CallNode* op) final { if (op->op.same_as(builtin::likely())) { - CHECK_EQ(op->args.size(), 1); + ICHECK_EQ(op->args.size(), 1); return StmtExprMutator::VisitExpr(op->args[0]); } else { return StmtExprMutator::VisitExpr_(op); diff --git a/src/tir/transforms/lower_custom_datatypes.cc b/src/tir/transforms/lower_custom_datatypes.cc index a0faa17fbcc3..a3e5a920a0b2 100644 --- a/src/tir/transforms/lower_custom_datatypes.cc +++ b/src/tir/transforms/lower_custom_datatypes.cc @@ -53,9 +53,9 @@ class CustomDatatypesLowerer : public StmtExprMutator { PrimExpr expr = StmtExprMutator::VisitExpr_(op); if (toBeLowered) { auto lower = datatype::GetCastLowerFunc(target_, type_code, src_type_code); - CHECK(lower) << "Cast lowering function for target " << target_ << " destination type " - << static_cast(type_code) << " source type " - << static_cast(src_type_code) << " not found"; + ICHECK(lower) << "Cast lowering function for target " << target_ << " destination type " + << static_cast(type_code) << " source type " + << static_cast(src_type_code) << " not found"; return (*lower)(expr); } return expr; @@ -66,8 +66,8 @@ class CustomDatatypesLowerer : public StmtExprMutator { auto e = GetRef(imm); if (datatype::Registry::Global()->GetTypeRegistered(type_code)) { auto lower = datatype::GetFloatImmLowerFunc(target_, type_code); - CHECK(lower) << "FloatImm lowering function for target " << target_ << " type " - << static_cast(type_code) << " not found"; + ICHECK(lower) << "FloatImm lowering function for target " << target_ << " type " + << static_cast(type_code) << " not found"; return (*lower)(e); } return e; @@ -103,11 +103,11 @@ class CustomDatatypesLowerer : public StmtExprMutator { call = expr.as(); if (toBeLowered) { auto op = call->op.as(); - CHECK(op != nullptr) << "Lowering non-intrinsic Calls not implemented"; + ICHECK(op != nullptr) << "Lowering non-intrinsic Calls not implemented"; auto lower = datatype::GetIntrinLowerFunc(target_, op->name, call->dtype.code()); - CHECK(lower) << "Intrinsic lowering function for target " << target_ << ", intrinsic name " - << op->name << ", type " << static_cast(call->dtype.code()) - << " not found"; + ICHECK(lower) << "Intrinsic lowering function for target " << target_ << ", intrinsic name " + << op->name << ", type " << static_cast(call->dtype.code()) + << " not found"; return (*lower)(expr); } return expr; @@ -121,8 +121,8 @@ class CustomDatatypesLowerer : public StmtExprMutator { op = expr.as(); \ if (toBeLowered) { \ auto lower = datatype::Get##OP##LowerFunc(target_, type_code); \ - CHECK(lower) << #OP " lowering function for target " << target_ << " type " \ - << static_cast(type_code) << " not found"; \ + ICHECK(lower) << #OP " lowering function for target " << target_ << " type " \ + << static_cast(type_code) << " not found"; \ return (*lower)(expr); \ } \ return expr; \ @@ -153,7 +153,7 @@ Pass LowerCustomDatatypes() { auto pass_func = [](PrimFunc f, IRModule m, PassContext ctx) { auto* n = f.CopyOnWrite(); auto target = f->GetAttr(tvm::attr::kTarget); - CHECK(target.defined()) << "LowerCustomDatatypes: Require the target attribute"; + ICHECK(target.defined()) << "LowerCustomDatatypes: Require the target attribute"; n->body = CustomDatatypesLowerer(target.value()->kind->name)(std::move(n->body)); return f; diff --git a/src/tir/transforms/lower_device_storage_access_info.cc b/src/tir/transforms/lower_device_storage_access_info.cc index 3b317e3f9968..829b7d822d11 100644 --- a/src/tir/transforms/lower_device_storage_access_info.cc +++ b/src/tir/transforms/lower_device_storage_access_info.cc @@ -49,7 +49,7 @@ class StorageAccessInfoLower : public StmtExprMutator { if (it != storage_info_.end() && it->second.info.defined()) { const MemoryInfo& info = it->second.info; ++it->second.alloc_count; - CHECK_LE(it->second.alloc_count, 1) + ICHECK_LE(it->second.alloc_count, 1) << "Double allocation of " << it->second.scope.to_string(); if (info->head_address.defined()) { @@ -69,7 +69,7 @@ class StorageAccessInfoLower : public StmtExprMutator { e.scope = scope; if (scope.tag.length() != 0) { e.info = GetMemoryInfo(op->value.as()->value); - CHECK(e.info.defined()) << "Cannot find memory info of " << scope.to_string(); + ICHECK(e.info.defined()) << "Cannot find memory info of " << scope.to_string(); } storage_info_[buf] = e; return StmtExprMutator::VisitStmt_(op); @@ -93,7 +93,7 @@ class StorageAccessInfoLower : public StmtExprMutator { // Specially handle the buffer packed intrinsic PrimExpr expr = StmtExprMutator::VisitExpr_(op); op = expr.as(); - CHECK_EQ(op->args.size(), 5U); + ICHECK_EQ(op->args.size(), 5U); DataType dtype = op->args[0].dtype(); const VarNode* buffer = op->args[1].as(); Var buffer_var = Downcast(op->args[1]); @@ -102,7 +102,7 @@ class StorageAccessInfoLower : public StmtExprMutator { if (it != storage_info_.end() && it->second.info.defined()) { return MakeTaggedAccessPtr(op->dtype, buffer_var, dtype, offset, it->second.info); } - CHECK(op->dtype.is_handle()); + ICHECK(op->dtype.is_handle()); // Change to address_of return AddressOffset(buffer_var, dtype, offset); } @@ -110,11 +110,11 @@ class StorageAccessInfoLower : public StmtExprMutator { PrimExpr MakeTaggedAccessPtr(DataType ptr_type, Var buffer_var, DataType dtype, PrimExpr offset, const MemoryInfo& info) { if (ptr_type.is_handle()) { - CHECK(info->head_address.defined()) << buffer_var << " is not adddressable."; + ICHECK(info->head_address.defined()) << buffer_var << " is not adddressable."; return AddressOffset(buffer_var, dtype, offset); } int dtype_bits = dtype.bits() * dtype.lanes(); - CHECK_EQ(info->unit_bits % dtype_bits, 0); + ICHECK_EQ(info->unit_bits % dtype_bits, 0); return cast(ptr_type, analyzer_.Simplify( offset / make_const(offset.dtype(), info->unit_bits / dtype_bits))); } diff --git a/src/tir/transforms/lower_intrin.cc b/src/tir/transforms/lower_intrin.cc index 8774fc37125f..cd7c10ffa688 100644 --- a/src/tir/transforms/lower_intrin.cc +++ b/src/tir/transforms/lower_intrin.cc @@ -86,7 +86,7 @@ class IntrinInjecter : public tvm::arith::IRMutatorWithAnalyzer { if (op == nullptr) return ret; int shift; const DataType& dtype = op->dtype; - CHECK(dtype.is_int() || dtype.is_uint()); + ICHECK(dtype.is_int() || dtype.is_uint()); if (support_bitwise_op_ && is_const_power_of_two_integer(op->b, &shift)) { // lower to right shift if possible. @@ -138,7 +138,7 @@ class IntrinInjecter : public tvm::arith::IRMutatorWithAnalyzer { // Lower floordiv to native truncdiv. int shift; const DataType& dtype = op->dtype; - CHECK(dtype.is_int() || dtype.is_uint()); + ICHECK(dtype.is_int() || dtype.is_uint()); if (support_bitwise_op_ && is_const_power_of_two_integer(op->b, &shift)) { // lower to masking if possible. @@ -281,7 +281,7 @@ class IntrinInjecter : public tvm::arith::IRMutatorWithAnalyzer { // if pattern exists. if (f != nullptr) { PrimExpr r = (*f)(e); - CHECK(r.defined()) << "intrinsic rule must always return valid Expr"; + ICHECK(r.defined()) << "intrinsic rule must always return valid Expr"; if (!r.same_as(e)) { return this->VisitExpr(r); } @@ -307,7 +307,7 @@ Pass LowerIntrin() { auto pass_func = [](PrimFunc f, IRModule m, PassContext ctx) { auto* n = f.CopyOnWrite(); auto target = f->GetAttr(tvm::attr::kTarget); - CHECK(target.defined()) << "LowerIntrin: Require the target attribute"; + ICHECK(target.defined()) << "LowerIntrin: Require the target attribute"; arith::Analyzer analyzer; auto mtriple = target.value()->GetAttr("mtriple", ""); n->body = diff --git a/src/tir/transforms/lower_thread_allreduce.cc b/src/tir/transforms/lower_thread_allreduce.cc index 720c9d0a67e0..c24e26b58db0 100644 --- a/src/tir/transforms/lower_thread_allreduce.cc +++ b/src/tir/transforms/lower_thread_allreduce.cc @@ -59,7 +59,7 @@ class ThreadAllreduceBuilder final : public StmtExprMutator { } } else if (op->attr_key == attr::reduce_scope) { const CommReducerNode* combiner = op->node.as(); - CHECK(combiner); + ICHECK(combiner); reduce_combiner_.push_back(combiner); Stmt ret = StmtExprMutator::VisitStmt_(op); reduce_combiner_.pop_back(); @@ -101,7 +101,7 @@ class ThreadAllreduceBuilder final : public StmtExprMutator { PrimExpr VisitExpr_(const LoadNode* op) final { auto it = load_remap_.find(op->buffer_var.get()); if (it != load_remap_.end()) { - CHECK(is_zero(op->index)); + ICHECK(is_zero(op->index)); return it->second; } else { return StmtExprMutator::VisitExpr_(op); @@ -122,13 +122,13 @@ class ThreadAllreduceBuilder final : public StmtExprMutator { // make allreduce. Stmt MakeAllreduce(const CallNode* call) { - CHECK(!reduce_combiner_.empty()); + ICHECK(!reduce_combiner_.empty()); const CommReducerNode* combiner = reduce_combiner_.back(); size_t size = combiner->result.size(); const IntImmNode* size_of_args = call->args[0].as(); - CHECK(size_of_args) << call->args[0]->GetTypeKey(); - CHECK_EQ(size, size_of_args->value); + ICHECK(size_of_args) << call->args[0]->GetTypeKey(); + ICHECK_EQ(size, size_of_args->value); Array inits = combiner->identity_element; std::vector values(size); std::vector types(size); @@ -143,7 +143,7 @@ class ThreadAllreduceBuilder final : public StmtExprMutator { std::vector buffers(size); for (size_t idx = 0; idx < size; ++idx) { const VarNode* buffer = call->args[2 + size + idx].as(); - CHECK(buffer); + ICHECK(buffer); buffers[idx] = buffer; } @@ -156,7 +156,7 @@ class ThreadAllreduceBuilder final : public StmtExprMutator { if (v) { reduce_set.insert(v); } else { - CHECK(call->args[i].as() && call->args[i].as()->value == 0) + ICHECK(call->args[i].as() && call->args[i].as()->value == 0) << "arg" << i << "should be a VarNode or IntImmNode"; } } @@ -168,11 +168,11 @@ class ThreadAllreduceBuilder final : public StmtExprMutator { IterVar iv = Downcast(attr->node); e.scope = runtime::ThreadScope::Create(iv->thread_tag); e.iv = iv; - CHECK_LE(e.scope.rank, 1); - CHECK_GE(e.scope.dim_index, 0) << "vthread do not work with cross thread reduction"; + ICHECK_LE(e.scope.rank, 1); + ICHECK_GE(e.scope.dim_index, 0) << "vthread do not work with cross thread reduction"; if (e.scope.rank == 1) { const auto* ptr = attr->value.as(); - CHECK(ptr) << "Need constant extent for reduce set " << iv; + ICHECK(ptr) << "Need constant extent for reduce set " << iv; e.extent = static_cast(ptr->value); // ignore variables equal to 0 if (e.extent == 1) { @@ -187,7 +187,7 @@ class ThreadAllreduceBuilder final : public StmtExprMutator { } } } - CHECK_EQ(nmatch, reduce_set.size()) << "Not all reduce index are presented in the context"; + ICHECK_EQ(nmatch, reduce_set.size()) << "Not all reduce index are presented in the context"; std::sort(vred.begin(), vred.end()); std::sort(vpar.begin(), vpar.end()); // the size of each index. @@ -216,7 +216,7 @@ class ThreadAllreduceBuilder final : public StmtExprMutator { // if (is_warp_reduction(types)) { // TODO(tvm-team) sub-warp reduction support. - CHECK_EQ(reduce_extent, warp_size_) << "not a warp reduction"; + ICHECK_EQ(reduce_extent, warp_size_) << "not a warp reduction"; // // This is the index to the reduction variable, one reduction // variable per warp. Local scope seems easier to reason without @@ -309,7 +309,7 @@ class ThreadAllreduceBuilder final : public StmtExprMutator { // Update existing allocations. for (size_t i = 0; i < size; ++i) { - CHECK(!load_remap_.count(buffers[i])); + ICHECK(!load_remap_.count(buffers[i])); PrimExpr pred = const_true(types[i].lanes()); Var var = shared_bufs[i]; load_remap_[buffers[i]] = Load(types[i], var, index, pred); @@ -347,7 +347,7 @@ class ThreadAllreduceBuilder final : public StmtExprMutator { seq.emplace_back(MakeBufAllreduce(combiner, types, shared_bufs, reduce_index, group_index, reduce_extent, threadx_extent)); for (size_t idx = 0; idx < size; ++idx) { - CHECK(!load_remap_.count(buffers[idx])); + ICHECK(!load_remap_.count(buffers[idx])); PrimExpr pred = const_true(types[idx].lanes()); load_remap_[buffers[idx]] = Load(types[idx], shared_bufs[idx], @@ -380,7 +380,7 @@ class ThreadAllreduceBuilder final : public StmtExprMutator { while (reduce_extent > reduce_align) { reduce_align = reduce_align << 1; } - CHECK_GT(reduce_align, 1); + ICHECK_GT(reduce_align, 1); std::vector seq; size_t size = shared_bufs.size(); @@ -409,7 +409,7 @@ class ThreadAllreduceBuilder final : public StmtExprMutator { seq.emplace_back(IfThenElse(cond, freduce(reduce_align))); seq.emplace_back(SyncThread("shared")); } - CHECK(threadx_extent >= 1 && warp_size_ >= 1); + ICHECK(threadx_extent >= 1 && warp_size_ >= 1); // normal synchronization while (reduce_align > threadx_extent || reduce_align > warp_size_) { reduce_align = reduce_align >> 1; @@ -446,7 +446,7 @@ class ThreadAllreduceBuilder final : public StmtExprMutator { if (ret.defined()) { ret = ret + e.iv->var * total_extent; } else { - CHECK_EQ(total_extent, 1); + ICHECK_EQ(total_extent, 1); ret = e.iv->var; } total_extent *= e.extent; @@ -547,7 +547,7 @@ Pass LowerThreadAllreduce() { auto pass_func = [](PrimFunc f, IRModule m, PassContext ctx) { auto* n = f.CopyOnWrite(); auto target = f->GetAttr(tvm::attr::kTarget); - CHECK(target.defined()) << "LowerThreadAllreduce: Require the target attribute"; + ICHECK(target.defined()) << "LowerThreadAllreduce: Require the target attribute"; const TargetNode* target_node = target.as(); n->body = ThreadAllreduceBuilder(target_node)(n->body); return f; diff --git a/src/tir/transforms/lower_tvm_builtin.cc b/src/tir/transforms/lower_tvm_builtin.cc index 39e6640eece6..1d12d57d10b4 100644 --- a/src/tir/transforms/lower_tvm_builtin.cc +++ b/src/tir/transforms/lower_tvm_builtin.cc @@ -35,7 +35,7 @@ namespace tvm { namespace tir { inline PrimExpr ConstInt32(size_t index) { - CHECK_LE(index, std::numeric_limits::max()); + ICHECK_LE(index, std::numeric_limits::max()); return make_const(DataType::Int(32), static_cast(index)); } @@ -70,8 +70,8 @@ class BuiltinLower : public StmtExprMutator { Stmt VisitStmt(const Stmt& s) final { auto stmt = StmtExprMutator::VisitStmt(s); - CHECK_EQ(run_shape_stack_, -1); - CHECK_EQ(run_array_stack_, 0); + ICHECK_EQ(run_shape_stack_, -1); + ICHECK_EQ(run_array_stack_, 0); if (prep_seq_.size() != 0) { Stmt ret = SeqStmt::Flatten(prep_seq_, stmt); @@ -102,8 +102,8 @@ class BuiltinLower : public StmtExprMutator { for (size_t i = 0; i < op->extents.size(); ++i) { total_bytes = total_bytes * op->extents[i]; } - CHECK(device_type_.defined()) << "Unknown device type in current IR"; - CHECK(device_id_.defined()) << "Unknown device id in current IR"; + ICHECK(device_type_.defined()) << "Unknown device type in current IR"; + ICHECK(device_id_.defined()) << "Unknown device id in current IR"; Stmt throw_last_error = Evaluate(Call(DataType::Int(32), builtin::tvm_throw_last_error(), {})); Stmt body = SeqStmt({IfThenElse(Call(DataType::Bool(1), builtin::isnullptr(), {op->buffer_var}), @@ -129,11 +129,11 @@ class BuiltinLower : public StmtExprMutator { Stmt VisitStmt_(const AttrStmtNode* op) final { if (op->attr_key == attr::device_context_id) { - CHECK(!device_id_.defined()); + ICHECK(!device_id_.defined()); device_id_ = op->value; return this->VisitStmt(op->body); } else if (op->attr_key == attr::device_context_type) { - CHECK(!device_type_.defined()); + ICHECK(!device_type_.defined()); device_type_ = op->value; return this->VisitStmt(op->body); } else { @@ -202,8 +202,8 @@ class BuiltinLower : public StmtExprMutator { } prep_seq_.emplace_back(TVMStructSet(stack_array_, idx, builtin::kArrByteOffset, cast(DataType::UInt(64), byte_offset))); - CHECK(device_type_.defined()) << "Unknown device type in current IR"; - CHECK(device_id_.defined()) << "Unknown device id in current IR"; + ICHECK(device_type_.defined()) << "Unknown device type in current IR"; + ICHECK(device_id_.defined()) << "Unknown device id in current IR"; prep_seq_.emplace_back(TVMStructSet(stack_array_, idx, builtin::kArrDeviceId, cast(DataType::Int(32), device_id_))); prep_seq_.emplace_back(TVMStructSet(stack_array_, idx, builtin::kArrDeviceType, @@ -256,7 +256,7 @@ class BuiltinLower : public StmtExprMutator { size_t arg_stack_begin = run_arg_stack_; run_arg_stack_ += op->args.size(); size_t args_size = op->args.size(); - CHECK_GT(args_size, 0); + ICHECK_GT(args_size, 0); PrimExpr expr = StmtExprMutator::VisitExpr_(op); op = expr.as(); for (size_t i = 1; i < op->args.size(); ++i) { @@ -270,7 +270,7 @@ class BuiltinLower : public StmtExprMutator { prep_seq_.emplace_back(TVMStructSet(stack_value_, static_cast(arg_stack_begin + i - 1), builtin::kTVMValueContent, arg)); int arg_tcode = api_type.code(); - CHECK(!IsArrayHandle(arg)) << "Trace does not support Buffers"; + ICHECK(!IsArrayHandle(arg)) << "Trace does not support Buffers"; prep_seq_.emplace_back( Store(stack_tcode_, ConstInt32(arg_tcode), stack_index, const_true(1))); } diff --git a/src/tir/transforms/lower_warp_memory.cc b/src/tir/transforms/lower_warp_memory.cc index cb6c609ef657..b95681a936ca 100644 --- a/src/tir/transforms/lower_warp_memory.cc +++ b/src/tir/transforms/lower_warp_memory.cc @@ -117,7 +117,7 @@ class WarpStoreCoeffFinder : private StmtVisitor { UpdatePattern(op->index); } else { arith::PVar base; - CHECK(arith::ramp(base, 1, op->value.dtype().lanes()).Match(op->index)) + ICHECK(arith::ramp(base, 1, op->value.dtype().lanes()).Match(op->index)) << "LowerWarpMemory failed due to store index=" << op->index << ", can only handle continuous store"; UpdatePattern(base.Eval()); @@ -129,20 +129,20 @@ class WarpStoreCoeffFinder : private StmtVisitor { void UpdatePattern(const PrimExpr& index) { Array m = arith::DetectLinearEquation(index, {warp_index_}); - CHECK_EQ(m.size(), 2U) + ICHECK_EQ(m.size(), 2U) << "LowerWarpMemory failed. Could not simplify the store index `" << index << "` into the form ax + by + cz + ... Warp memory is approximated by storing values in " "thread local registers and shuffling values between these registers. Currently only " "linear equation indices are supported."; PrimExpr mcoeff = analyzer_->canonical_simplify(m[0]); const auto* mcoeff_as_int = mcoeff.as(); - CHECK(mcoeff_as_int && mcoeff_as_int->value > 0) + ICHECK(mcoeff_as_int && mcoeff_as_int->value > 0) << "LowerWarpMemory failed due to store index=" << index << ", require positive constant coefficient on warp index " << warp_index_ << " but get " << mcoeff; if (warp_coeff_ != 0) { - CHECK_EQ(warp_coeff_, mcoeff_as_int->value) + ICHECK_EQ(warp_coeff_, mcoeff_as_int->value) << "LowerWarpMemory failed due to two different store coefficient to warp index"; } else { warp_coeff_ = mcoeff_as_int->value; @@ -166,7 +166,7 @@ class WarpIndexFinder : private StmtVisitor { // find the warp co-efficient and the shuffle width in the statement std::pair Find(const Stmt& stmt) { this->VisitStmt(stmt); - CHECK(warp_index_.defined()) + ICHECK(warp_index_.defined()) << "Cannot find warp index(threadIdx.x) within the scope of warp memory"; return std::make_pair(warp_index_->var, width_); } @@ -178,14 +178,14 @@ class WarpIndexFinder : private StmtVisitor { IterVar iv = Downcast(op->node); if (iv->thread_tag == "threadIdx.x") { auto* value_as_int = op->value.as(); - CHECK(value_as_int && value_as_int->value <= warp_size_ && - warp_size_ % value_as_int->value == 0) + ICHECK(value_as_int && value_as_int->value <= warp_size_ && + warp_size_ % value_as_int->value == 0) << "Expect threadIdx.x 's size to be no larger than, and a factor of" << " warp size(" << warp_size_ << ")" << " to enable warp memory" << " but get " << op->value << " instead"; if (warp_index_.defined()) { - CHECK(warp_index_.same_as(iv)) + ICHECK(warp_index_.same_as(iv)) << "Find two instance of " << warp_index_->thread_tag << " in the same kernel. " << "Please create it using thread_axis once and reuse the axis " << "across multiple binds in the same kernel"; @@ -214,7 +214,7 @@ class WarpAccessRewriter : protected StmtExprMutator { Stmt Rewrite(const AllocateNode* op) { buffer_ = op->buffer_var.get(); int alloc_size = op->constant_allocation_size(); - CHECK_GT(alloc_size, 0) << "warp memory only support constant alloc size"; + ICHECK_GT(alloc_size, 0) << "warp memory only support constant alloc size"; alloc_size *= op->dtype.lanes(); std::tie(warp_index_, width_) = WarpIndexFinder(warp_size_).Find(op->body); warp_coeff_ = WarpStoreCoeffFinder(buffer_, warp_index_, analyzer_).Find(op->body); @@ -231,7 +231,7 @@ class WarpAccessRewriter : protected StmtExprMutator { protected: PrimExpr VisitExpr_(const VarNode* op) override { - CHECK(op != buffer_) << "Cannot access address of warp memory directly"; + ICHECK(op != buffer_) << "Cannot access address of warp memory directly"; return StmtExprMutator::VisitExpr_(op); } @@ -250,7 +250,7 @@ class WarpAccessRewriter : protected StmtExprMutator { PrimExpr local_index, group; std::tie(local_index, group) = SplitIndexByGroup(op->index); // invariance: local index must do not contain warp id - CHECK(!ExprUseVar(local_index, warp_index_)) + ICHECK(!ExprUseVar(local_index, warp_index_)) << "LowerWarpMemory failed to rewrite load to shuffle for index " << op->index << " local_index=" << local_index; PrimExpr load_value = Load(op->dtype, op->buffer_var, local_index, op->predicate); @@ -271,7 +271,7 @@ class WarpAccessRewriter : protected StmtExprMutator { PrimExpr local_index, group; arith::PVar base; - CHECK(arith::ramp(base, 1, index.dtype().lanes()).Match(index)); + ICHECK(arith::ramp(base, 1, index.dtype().lanes()).Match(index)); std::tie(local_index, group) = SplitIndexByGroup(base.Eval()); local_index = Ramp(local_index, make_const(local_index.dtype(), 1), index.dtype().lanes()); @@ -326,7 +326,7 @@ class BindVarBoundInfo : public StmtVisitor { void VisitStmt_(const AttrStmtNode* op) { if (op->attr_key == attr::thread_extent || op->attr_key == attr::virtual_thread) { IterVar iv = Downcast(op->node); - CHECK_NE(iv->thread_tag.length(), 0U); + ICHECK_NE(iv->thread_tag.length(), 0U); if (!var_dom_.count(iv->var.get())) { Range dom = Range::FromMinExtent(0, op->value); var_dom_[iv->var.get()] = dom; @@ -395,7 +395,7 @@ Pass LowerWarpMemory() { auto pass_func = [](PrimFunc f, IRModule m, PassContext ctx) { auto* n = f.CopyOnWrite(); auto target = f->GetAttr(tvm::attr::kTarget); - CHECK(target.defined()) << "LowerWarpMemory: Require the target attribute"; + ICHECK(target.defined()) << "LowerWarpMemory: Require the target attribute"; int warp_size = target.value()->GetAttr("thread_warp_size", 1).value(); n->body = WarpMemoryRewriter(warp_size).Rewrite(std::move(n->body)); return f; diff --git a/src/tir/transforms/make_packed_api.cc b/src/tir/transforms/make_packed_api.cc index 3cacf52d90d2..7c4a8ef92724 100644 --- a/src/tir/transforms/make_packed_api.cc +++ b/src/tir/transforms/make_packed_api.cc @@ -47,10 +47,10 @@ inline Stmt MakeAssertEQ(PrimExpr lhs, PrimExpr rhs, std::string msg) { PrimFunc MakePackedAPI(PrimFunc&& func, int num_unpacked_args) { auto global_symbol = func->GetAttr(tvm::attr::kGlobalSymbol); - CHECK(global_symbol) << "MakePackedAPI: Expect PrimFunc to have the global_symbol attribute"; + ICHECK(global_symbol) << "MakePackedAPI: Expect PrimFunc to have the global_symbol attribute"; auto target = func->GetAttr(tvm::attr::kTarget); - CHECK(target.defined()) << "MakePackedAPI: Require the target attribute"; + ICHECK(target.defined()) << "MakePackedAPI: Require the target attribute"; int target_device_type = target.value()->kind->device_type; std::string name_hint = global_symbol.value(); @@ -58,7 +58,7 @@ PrimFunc MakePackedAPI(PrimFunc&& func, int num_unpacked_args) { auto* func_ptr = func.CopyOnWrite(); const Stmt nop = Evaluate(0); int num_args = static_cast(func_ptr->params.size()); - CHECK_LE(num_unpacked_args, num_args); + ICHECK_LE(num_unpacked_args, num_args); int num_packed_args = num_args - num_unpacked_args; // Data field definitions @@ -143,7 +143,7 @@ PrimFunc MakePackedAPI(PrimFunc&& func, int num_unpacked_args) { msg << name_hint << ": Expect arg[" << i << "] to be int"; seq_check.emplace_back(AssertStmt(tcode == kDLInt, tvm::tir::StringImm(msg.str()), nop)); } else { - CHECK(t.is_float()); + ICHECK(t.is_float()); std::ostringstream msg; msg << name_hint << ": Expect arg[" << i << "] to be float"; seq_check.emplace_back(AssertStmt(tcode == kDLFloat, tvm::tir::StringImm(msg.str()), nop)); @@ -161,7 +161,7 @@ PrimFunc MakePackedAPI(PrimFunc&& func, int num_unpacked_args) { } size_t expected_nargs = num_unpacked_args + (num_packed_args != 0 ? 6 : 0); - CHECK_EQ(args.size(), expected_nargs); + ICHECK_EQ(args.size(), expected_nargs); // Arg definitions are defined before buffer binding to avoid the use before // def errors. diff --git a/src/tir/transforms/narrow_datatype.cc b/src/tir/transforms/narrow_datatype.cc index 4d6aa88ede01..0b248959ec6e 100644 --- a/src/tir/transforms/narrow_datatype.cc +++ b/src/tir/transforms/narrow_datatype.cc @@ -105,7 +105,7 @@ class DataTypeVisitor final : public StmtExprVisitor { void VisitStmt_(const AttrStmtNode* op) { if (op->attr_key == attr::thread_extent || op->attr_key == attr::virtual_thread) { IterVar iv = Downcast(op->node); - CHECK_NE(iv->thread_tag.length(), 0U); + ICHECK_NE(iv->thread_tag.length(), 0U); analyzer_.Bind(iv->var, Range::FromMinExtent(0, op->value)); vextent_[iv->var.as()] = op->value.dtype(); StmtExprVisitor::VisitStmt_(op); @@ -216,8 +216,8 @@ class DataTypeRewriter : public StmtExprMutator { Stmt VisitStmt_(const ForNode* op) final { Stmt s = StmtExprMutator::VisitStmt_(op); op = s.as(); - CHECK(op != nullptr) << "Expected type to be ForNode" - << ", but get " << s->GetTypeKey(); + ICHECK(op != nullptr) << "Expected type to be ForNode" + << ", but get " << s->GetTypeKey(); PrimExpr e = VisitExpr(op->loop_var); Var var = Downcast(e); return For(var, cast(var.dtype(), op->min), cast(var.dtype(), op->extent), op->for_type, @@ -228,11 +228,11 @@ class DataTypeRewriter : public StmtExprMutator { if (op->attr_key == attr::thread_extent || op->attr_key == attr::virtual_thread) { Stmt s = StmtExprMutator::VisitStmt_(op); op = s.as(); - CHECK(op != nullptr) << "Expected type to be AttrStmtNode" - << ", but get " << s->GetTypeKey(); + ICHECK(op != nullptr) << "Expected type to be AttrStmtNode" + << ", but get " << s->GetTypeKey(); const IterVarNode* iv = op->node.as(); - CHECK(iv != nullptr) << "Expected type to be IterVarNode" - << ", but get " << op->node->GetTypeKey(); + ICHECK(iv != nullptr) << "Expected type to be IterVarNode" + << ", but get " << op->node->GetTypeKey(); PrimExpr e = VisitExpr(iv->var); Var var = Downcast(e); if (ivmap_.find(iv) == ivmap_.end()) { @@ -284,8 +284,8 @@ class DataTypeRewriter : public StmtExprMutator { if (is_index_ && visitor_.vmap.find(op) != visitor_.vmap.end()) { PrimExpr e = StmtExprMutator::VisitExpr_(op); const CastNode* new_op = e.as(); - CHECK(new_op != nullptr) << "Expected type to be CastNode" - << ", but get " << e->GetTypeKey(); + ICHECK(new_op != nullptr) << "Expected type to be CastNode" + << ", but get " << e->GetTypeKey(); return Cast(visitor_.vmap[op], new_op->value); } return StmtExprMutator::VisitExpr_(op); @@ -353,8 +353,8 @@ DEFINE_BIOP_EXPR_MUTATE_WITH_TYPE_MATCH(GENode, operator>=); PrimExpr DataTypeRewriter::VisitExpr_(const CallNode* op) { PrimExpr e = StmtExprMutator::VisitExpr_(op); op = e.as(); - CHECK(op != nullptr) << "Expected type to be CallNode" - << ", but get " << e->GetTypeKey(); + ICHECK(op != nullptr) << "Expected type to be CallNode" + << ", but get " << e->GetTypeKey(); if (op->op.same_as(builtin::if_then_else())) { return if_then_else(op->args[0], op->args[1], op->args[2]); diff --git a/src/tir/transforms/remap_thread_axis.cc b/src/tir/transforms/remap_thread_axis.cc index 017d1b4e6c67..e101e6b904ce 100644 --- a/src/tir/transforms/remap_thread_axis.cc +++ b/src/tir/transforms/remap_thread_axis.cc @@ -41,7 +41,7 @@ class ThreadAxisRewriter : private StmtExprMutator { Stmt VisitStmt_(const AttrStmtNode* op) final { if (op->attr_key == attr::thread_extent) { IterVar iv = Downcast(op->node); - CHECK_NE(iv->thread_tag.length(), 0U); + ICHECK_NE(iv->thread_tag.length(), 0U); auto it = tmap_.find(iv->thread_tag); if (it != tmap_.end()) { const IterVar& new_iv = it->second; @@ -49,7 +49,7 @@ class ThreadAxisRewriter : private StmtExprMutator { if (!vmap_.count(v)) { vmap_[v] = new_iv->var; } else { - CHECK(vmap_[v].same_as(new_iv->var)); + ICHECK(vmap_[v].same_as(new_iv->var)); } Stmt body = this->VisitStmt(op->body); return AttrStmt(new_iv, op->attr_key, op->value, body); @@ -76,7 +76,7 @@ PrimFunc RemapThreadAxis(PrimFunc&& f, Map thread_map) } auto opt_thread_axis = f->GetAttr>(tir::attr::kDeviceThreadAxis); - CHECK(opt_thread_axis != nullptr) << "Require attribute " << tir::attr::kDeviceThreadAxis; + ICHECK(opt_thread_axis != nullptr) << "Require attribute " << tir::attr::kDeviceThreadAxis; auto thread_axis = opt_thread_axis.value(); auto* n = f.CopyOnWrite(); diff --git a/src/tir/transforms/remove_no_op.cc b/src/tir/transforms/remove_no_op.cc index baa1c3c368fd..aae1749b27db 100644 --- a/src/tir/transforms/remove_no_op.cc +++ b/src/tir/transforms/remove_no_op.cc @@ -97,7 +97,7 @@ class NoOpRemover : public StmtMutator { Stmt VisitStmt_(const SeqStmtNode* op) final { Stmt ret = StmtMutator::VisitSeqStmt_(op, true); op = ret.as(); - CHECK(op != nullptr); + ICHECK(op != nullptr); bool need_compact = false; for (size_t i = 0; i < op->size(); ++i) { if (is_no_op(op->seq[i])) need_compact = true; diff --git a/src/tir/transforms/split_host_device.cc b/src/tir/transforms/split_host_device.cc index c121285e2314..921c7ad79509 100644 --- a/src/tir/transforms/split_host_device.cc +++ b/src/tir/transforms/split_host_device.cc @@ -43,7 +43,7 @@ class VarUseDefAnalysis : public StmtExprMutator { Stmt VisitStmt_(const AttrStmtNode* op) final { if (op->attr_key == attr::thread_extent) { IterVar iv = Downcast(op->node); - CHECK_NE(iv->thread_tag.length(), 0U); + ICHECK_NE(iv->thread_tag.length(), 0U); // thread_extent can appear multiple times // use the first appearance as def. if (!use_count_.count(iv->var.get())) { @@ -108,7 +108,7 @@ class VarUseDefAnalysis : public StmtExprMutator { auto it = let_binding_.find(op->var); PrimExpr value = this->VisitExpr(op->value); if (it != let_binding_.end()) { - CHECK(deep_equal_(it->second->value, value)) + ICHECK(deep_equal_(it->second->value, value)) << "Let cannot bind the same var to two different values"; return GetRef(it->second); } else { @@ -147,16 +147,16 @@ class VarUseDefAnalysis : public StmtExprMutator { } void HandleDef(const VarNode* v) { - CHECK(!def_count_.count(v)) << "variable " << v->name_hint - << " has already been defined, the Stmt is not SSA"; - CHECK(!use_count_.count(v)) << "variable " << v->name_hint - << " has been used before definition!"; + ICHECK(!def_count_.count(v)) << "variable " << v->name_hint + << " has already been defined, the Stmt is not SSA"; + ICHECK(!use_count_.count(v)) << "variable " << v->name_hint + << " has been used before definition!"; use_count_[v] = 0; def_count_[v] = 1; } void HandleUse(const PrimExpr& v) { - CHECK(v.as()); + ICHECK(v.as()); Var var = Downcast(v); auto it = use_count_.find(var.get()); if (it != use_count_.end()) { @@ -290,9 +290,9 @@ class HostDeviceSplitter : public StmtMutator { PrimFunc SplitHostDevice(PrimFunc&& func, IRModule* device_mod) { auto target = func->GetAttr(tvm::attr::kTarget); - CHECK(target.defined()) << "SplitHostDevice: Require the target attribute"; + ICHECK(target.defined()) << "SplitHostDevice: Require the target attribute"; auto global_symbol = func->GetAttr(tvm::attr::kGlobalSymbol); - CHECK(global_symbol.defined()) + ICHECK(global_symbol.defined()) << "SplitHostDevice: Expect PrimFunc to have the global_symbol attribute"; HostDeviceSplitter splitter(device_mod, target.value(), @@ -316,7 +316,7 @@ Pass SplitHostDevice() { for (auto& kv : *func_dict) { if (kv.second->IsInstance()) { PrimFunc func = Downcast(std::move(kv.second)); - CHECK(device_mod.defined()) << "The device module must be defined."; + ICHECK(device_mod.defined()) << "The device module must be defined."; kv.second = SplitHostDevice(std::move(func), &device_mod); } } diff --git a/src/tir/transforms/storage_access.cc b/src/tir/transforms/storage_access.cc index 6514a834b397..be20724ae207 100644 --- a/src/tir/transforms/storage_access.cc +++ b/src/tir/transforms/storage_access.cc @@ -37,7 +37,7 @@ void StorageAccessVisitor::VisitExpr_(const LoadNode* op) { const VarNode* buf = op->buffer_var.as(); StorageScope scope = GetScope(buf); if (Enabled(buf, scope)) { - CHECK(allow_append_) << op << " " << scope.to_string(); + ICHECK(allow_append_) << op << " " << scope.to_string(); AccessEntry e; e.threads = env_threads(); e.buffer = op->buffer_var; @@ -53,7 +53,7 @@ void StorageAccessVisitor::VisitExpr_(const LoadNode* op) { void StorageAccessVisitor::VisitStmt_(const StoreNode* op) { allow_append_ = true; - CHECK_EQ(curr_stmt_.access.size(), 0U); + ICHECK_EQ(curr_stmt_.access.size(), 0U); curr_stmt_.stmt = op; const VarNode* buf = op->buffer_var.as(); StorageScope scope = GetScope(buf); @@ -78,7 +78,7 @@ void StorageAccessVisitor::VisitStmt_(const StoreNode* op) { void StorageAccessVisitor::VisitStmt_(const EvaluateNode* op) { allow_append_ = true; - CHECK_EQ(curr_stmt_.access.size(), 0U); + ICHECK_EQ(curr_stmt_.access.size(), 0U); curr_stmt_.stmt = op; StmtExprVisitor::VisitStmt_(op); // push to the scope @@ -95,7 +95,7 @@ void StorageAccessVisitor::VisitStmt_(const AttrStmtNode* op) { storage_scope_[buf] = StorageScope::Create(op->value.as()->value); StmtExprVisitor::VisitStmt_(op); } else if (op->attr_key == attr::double_buffer_write) { - CHECK(double_buffer_write_ == nullptr); + ICHECK(double_buffer_write_ == nullptr); double_buffer_write_ = op->node.as(); scope_.push_back(std::vector()); StmtExprVisitor::VisitStmt_(op); @@ -151,7 +151,7 @@ void StorageAccessVisitor::VisitStmt_(const ForNode* op) { arith::IntSet::FromRange(Range::FromMinExtent(op->min, op->extent)); for (AccessEntry& e : s.access) { if (e.buffer.defined()) { - CHECK(e.touched.defined()); + ICHECK(e.touched.defined()); e.touched = arith::EvalSet(e.touched, relax_map); } } @@ -185,7 +185,7 @@ void StorageAccessVisitor::VisitExpr_(const CallNode* op) { const LoadNode* l = op->args[0].as(); StmtExprVisitor::VisitExpr_(l); } else if (op->op.same_as(builtin::tvm_access_ptr())) { - CHECK_EQ(op->args.size(), 5U); + ICHECK_EQ(op->args.size(), 5U); DataType dtype = op->args[0].dtype(); const VarNode* buffer = op->args[1].as(); PrimExpr offset = op->args[2]; @@ -194,7 +194,7 @@ void StorageAccessVisitor::VisitExpr_(const CallNode* op) { StorageScope scope = GetScope(buffer); // The buffer scope. if (Enabled(buffer, scope)) { - CHECK(allow_append_); + ICHECK(allow_append_); AccessEntry e; e.threads = env_threads(); e.dtype = dtype; @@ -212,7 +212,7 @@ void StorageAccessVisitor::VisitExpr_(const CallNode* op) { } StmtExprVisitor::VisitExpr_(op); } else if (op->op.same_as(builtin::tvm_storage_sync())) { - CHECK(allow_append_); + ICHECK(allow_append_); const std::string& s = op->args[0].as()->value; if (s != "warp") { StorageScope scope = StorageScope::Create(s); diff --git a/src/tir/transforms/storage_flatten.cc b/src/tir/transforms/storage_flatten.cc index c062cf73aeef..d392866b3694 100644 --- a/src/tir/transforms/storage_flatten.cc +++ b/src/tir/transforms/storage_flatten.cc @@ -69,7 +69,7 @@ class StorageFlattener : public StmtExprMutator { op = stmt.as(); auto it = var_remap_.find(op->buffer_var.get()); if (it != var_remap_.end() && !it->second.same_as(op->buffer_var)) { - CHECK(it->second.as()); + ICHECK(it->second.as()); Var buf_var = Downcast(it->second); return Store(buf_var, op->value, op->index, op->predicate); } else { @@ -86,7 +86,7 @@ class StorageFlattener : public StmtExprMutator { auto buffer = Downcast(op->node); Stmt body = this->VisitStmt(op->body); auto it = buf_map_.find(buffer); - CHECK(it != buf_map_.end()) << "Cannot find allocated buffer for " << buffer; + ICHECK(it != buf_map_.end()) << "Cannot find allocated buffer for " << buffer; body = AttrStmt(it->second.buffer->data, op->attr_key, op->value, std::move(body)); return body; } else if (op->attr_key == attr::thread_extent) { @@ -101,7 +101,7 @@ class StorageFlattener : public StmtExprMutator { } else if (op->attr_key == attr::buffer_dim_align) { auto buffer = Downcast(op->node); const CallNode* tuple = op->value.as(); - CHECK(tuple && tuple->op.same_as(builtin::tvm_tuple())); + ICHECK(tuple && tuple->op.same_as(builtin::tvm_tuple())); auto& vinfo = dim_align_[buffer]; int dim = tuple->args[0].as()->value; if (static_cast(dim) >= vinfo.size()) { @@ -122,10 +122,10 @@ class StorageFlattener : public StmtExprMutator { const auto& key = op->buffer; auto it = buf_map_.find(key); - CHECK(it != buf_map_.end()) << "Cannot find allocated buffer for " << key; + ICHECK(it != buf_map_.end()) << "Cannot find allocated buffer for " << key; const BufferEntry& e = it->second; - CHECK(!e.released) << "Read a buffer that is already out of scope"; + ICHECK(!e.released) << "Read a buffer that is already out of scope"; Stmt body = e.buffer.vstore(e.RelIndex(op->indices), op->value); if (create_bound_attributes_ && ShapeIsValid(e.buffer->shape)) { @@ -145,7 +145,7 @@ class StorageFlattener : public StmtExprMutator { const auto& key = op->buffer; if (buf_map_.count(key)) { - CHECK(buf_map_.at(key).external); + ICHECK(buf_map_.at(key).external); return this->VisitStmt(op->body); } else { // create a buffer entry @@ -157,7 +157,7 @@ class StorageFlattener : public StmtExprMutator { } // deduce current storage scope. auto it = storage_scope_.find(op->buffer.get()); - CHECK(it != storage_scope_.end()) << "Cannot find storage scope of " << op->buffer; + ICHECK(it != storage_scope_.end()) << "Cannot find storage scope of " << op->buffer; StorageScope skey; const std::string& strkey = it->second; if (strkey.length() == 0) { @@ -176,7 +176,7 @@ class StorageFlattener : public StmtExprMutator { MemoryInfo info = GetMemoryInfo(skey.to_string()); if (info.defined()) { align = (info->max_simd_bits + dtype.bits() - 1) / dtype.bits(); - CHECK_LE(const_size * dtype.bits(), info->max_num_bits) + ICHECK_LE(const_size * dtype.bits(), info->max_num_bits) << "Allocation exceed bound of memory tag " << skey.to_string(); } } @@ -243,7 +243,7 @@ class StorageFlattener : public StmtExprMutator { op = expr.as(); auto it = var_remap_.find(op->buffer_var.get()); if (it != var_remap_.end() && !it->second.same_as(op->buffer_var)) { - CHECK(it->second.as()); + ICHECK(it->second.as()); Var buf_var = Downcast(it->second); return Load(op->dtype, buf_var, op->index, op->predicate); } else { @@ -267,9 +267,9 @@ class StorageFlattener : public StmtExprMutator { const auto& key = op->buffer; auto it = buf_map_.find(key); - CHECK(it != buf_map_.end()) << "Cannot find allocated buffer for " << key; + ICHECK(it != buf_map_.end()) << "Cannot find allocated buffer for " << key; const BufferEntry& e = it->second; - CHECK(!e.released) << "Read a buffer that is already out of scope"; + ICHECK(!e.released) << "Read a buffer that is already out of scope"; if (create_bound_attributes_ && ShapeIsValid(e.buffer->shape)) { shape_collector_.push_back(std::make_pair(e.buffer->data, e.buffer->shape)); @@ -280,15 +280,15 @@ class StorageFlattener : public StmtExprMutator { Stmt VisitStmt_(const PrefetchNode* op) final { Stmt stmt = StmtExprMutator::VisitStmt_(op); op = stmt.as(); - CHECK(op != nullptr); + ICHECK(op != nullptr); const auto& key = op->buffer; auto it = buf_map_.find(key); - CHECK(it != buf_map_.end()) << "Cannot find allocated buffer for " << key; + ICHECK(it != buf_map_.end()) << "Cannot find allocated buffer for " << key; const BufferEntry& e = it->second; - CHECK(!e.released) << "Read a buffer that is already out of scope"; - CHECK_EQ(e.buffer->shape.size(), op->bounds.size()) + ICHECK(!e.released) << "Read a buffer that is already out of scope"; + ICHECK_EQ(e.buffer->shape.size(), op->bounds.size()) << "Prefetch dim should be the same as buffer dim"; int block_size = 1, elem_cnt = cache_line_size_ / e.buffer->dtype.bytes(); @@ -385,22 +385,22 @@ class StorageFlattener : public StmtExprMutator { // region with shape [1, 1, n, m] to buffer with shape [n, m] Stmt HandleBufferBindScope(const AttrStmtNode* op) { Array arr = Downcast>(op->node); - CHECK_EQ(arr.size(), 2U); + ICHECK_EQ(arr.size(), 2U); const BufferNode* buffer = arr[0].as(); const BufferNode* target = arr[1].as(); const CallNode* tuple = op->value.as(); - CHECK(buffer && target); - CHECK(tuple && tuple->op.same_as(builtin::tvm_tuple())); + ICHECK(buffer && target); + ICHECK(tuple && tuple->op.same_as(builtin::tvm_tuple())); auto key = GetRef(target); auto it = buf_map_.find(key); - CHECK(it != buf_map_.end()) << "Cannot find buffer of " << key; + ICHECK(it != buf_map_.end()) << "Cannot find buffer of " << key; const BufferEntry& be = it->second; - CHECK(!be.released); - CHECK_EQ(tuple->args.size(), be.buffer->shape.size() * 2); + ICHECK(!be.released); + ICHECK_EQ(tuple->args.size(), be.buffer->shape.size() * 2); Array begins, extents; if (be.bounds.size() != 0) { - CHECK_EQ(tuple->args.size(), be.bounds.size() * 2); + ICHECK_EQ(tuple->args.size(), be.bounds.size() * 2); for (size_t i = 0; i < be.buffer->shape.size(); ++i) { begins.push_back(tuple->args[2 * i] - be.bounds[i]->min); extents.push_back(tuple->args[2 * i + 1]); @@ -414,7 +414,7 @@ class StorageFlattener : public StmtExprMutator { } Buffer slice = be.buffer.MakeSlice(begins, extents); if (buffer->strides.size() == 0) { - CHECK_EQ(slice->strides.size(), 0U) + ICHECK_EQ(slice->strides.size(), 0U) << "Trying to bind compact buffer to strided one strides=" << slice->strides; } else { slice = slice.MakeStrideView(); @@ -452,7 +452,7 @@ class StorageFlattener : public StmtExprMutator { inline Array RelIndex(Array args) const { if (bounds.size() != 0) { Array index; - CHECK_EQ(bounds.size(), args.size()); + ICHECK_EQ(bounds.size(), args.size()); for (size_t i = 0; i < bounds.size(); ++i) { index.push_back(args[i] - bounds[i]->min); } diff --git a/src/tir/transforms/storage_rewrite.cc b/src/tir/transforms/storage_rewrite.cc index 3abff415d1f1..2817b1334019 100644 --- a/src/tir/transforms/storage_rewrite.cc +++ b/src/tir/transforms/storage_rewrite.cc @@ -86,8 +86,8 @@ class LinearAccessPatternFinder final : public StmtExprVisitor { size_t level = scope_.size(); const VarNode* buf = op->buffer_var.get(); auto it = alloc_info_.find(buf); - CHECK(it != alloc_info_.end()); - CHECK(it->second.alloc == nullptr); + ICHECK(it != alloc_info_.end()); + ICHECK(it->second.alloc == nullptr); it->second.alloc = op; it->second.level = level; StmtExprVisitor::VisitStmt_(op); @@ -100,7 +100,7 @@ class LinearAccessPatternFinder final : public StmtExprVisitor { const VarNode* buf = op->buffer_var.get(); auto it = alloc_info_.find(buf); if (it != alloc_info_.end() && it->second.alloc) { - CHECK_LT(it->second.level, scope_.size()); + ICHECK_LT(it->second.level, scope_.size()); scope_[it->second.level].touched.push_back(buf); } StmtEntry e = scope_.back(); @@ -127,7 +127,7 @@ class LinearAccessPatternFinder final : public StmtExprVisitor { const VarNode* buf = op->buffer_var.get(); auto it = alloc_info_.find(buf); if (it != alloc_info_.end() && it->second.alloc) { - CHECK_LT(it->second.level, scope_.size()) << "Load memory in places other than store."; + ICHECK_LT(it->second.level, scope_.size()) << "Load memory in places other than store."; scope_[it->second.level].touched.push_back(buf); } } @@ -143,7 +143,7 @@ class LinearAccessPatternFinder final : public StmtExprVisitor { // Directly reference to the variable count as a read. auto it = alloc_info_.find(buf); if (it != alloc_info_.end() && it->second.alloc) { - CHECK_LT(it->second.level, scope_.size()) << " buf=" << buf->name_hint; + ICHECK_LT(it->second.level, scope_.size()) << " buf=" << buf->name_hint; scope_[it->second.level].touched.push_back(buf); } } @@ -160,11 +160,11 @@ class LinearAccessPatternFinder final : public StmtExprVisitor { e.touched = std::move(scope_.back().touched); scope_.pop_back(); int64_t end_index = static_cast(linear_seq_.size()); - CHECK_GT(end_index, begin_index); + ICHECK_GT(end_index, begin_index); e.scope_pair_offset = begin_index - end_index; linear_seq_.push_back(e); // record the pointer to end index. - CHECK_NE(end_index, 0U); + ICHECK_NE(end_index, 0U); linear_seq_[begin_index].scope_pair_offset = end_index - begin_index; } void VisitStmt_(const AttrStmtNode* op) final { @@ -349,7 +349,7 @@ class StoragePlanRewriter : public StmtExprMutator { if (attach_map_.count(nullptr)) { std::vector nest; for (StorageEntry* e : attach_map_.at(nullptr)) { - // CHECK_EQ(e->scope.rank, 0); + // ICHECK_EQ(e->scope.rank, 0); if (e->new_alloc.defined()) { nest.emplace_back(AttrStmt(e->alloc_var, attr::storage_scope, StringImm(e->scope.to_string()), Evaluate(0))); @@ -389,7 +389,7 @@ class StoragePlanRewriter : public StmtExprMutator { } PrimExpr VisitExpr_(const CallNode* op) final { if (op->op.same_as(builtin::tvm_access_ptr())) { - CHECK_EQ(op->args.size(), 5U); + ICHECK_EQ(op->args.size(), 5U); DataType dtype = op->args[0].dtype(); const VarNode* buffer = op->args[1].as(); auto it = alloc_map_.find(buffer); @@ -400,7 +400,7 @@ class StoragePlanRewriter : public StmtExprMutator { PrimExpr offset = this->VisitExpr(op->args[2]); PrimExpr extent = this->VisitExpr(op->args[3]); uint64_t elem_bits = dtype.bits() * dtype.lanes(); - CHECK_EQ(se->bits_offset % elem_bits, 0U); + ICHECK_EQ(se->bits_offset % elem_bits, 0U); if (se->bits_offset != 0) { offset = make_const(offset.dtype(), se->bits_offset / elem_bits) + offset; } @@ -435,7 +435,7 @@ class StoragePlanRewriter : public StmtExprMutator { } } Stmt VisitStmt_(const ForNode* op) final { - CHECK(op->for_type != ForType::Vectorized) << "VectorizeLoop before LiftStorageAlloc"; + ICHECK(op->for_type != ForType::Vectorized) << "VectorizeLoop before LiftStorageAlloc"; // remake all the allocation at the attach scope. if (attach_map_.count(op)) { auto& svec = attach_map_[op]; @@ -508,7 +508,7 @@ class StoragePlanRewriter : public StmtExprMutator { PrimExpr RemapIndex(DataType dtype, PrimExpr index, StorageEntry* e) { if (e->bits_offset == 0) return index; uint64_t elem_bits = dtype.bits() * dtype.lanes(); - CHECK_EQ(e->bits_offset % elem_bits, 0U); + ICHECK_EQ(e->bits_offset % elem_bits, 0U); return make_const(index.dtype(), e->bits_offset / elem_bits) + index; } // Prepare the new allocations @@ -525,7 +525,7 @@ class StoragePlanRewriter : public StmtExprMutator { for (size_t i = 0; i < vec.size(); ++i) { StorageEntry* e = vec[i]; if (e->scope.tag.length() != 0) { - CHECK_NE(e->const_nbits, 0U) << "Special tagged memory must be const size"; + ICHECK_NE(e->const_nbits, 0U) << "Special tagged memory must be const size"; for (size_t j = 0; j < i; ++j) { if (e->scope == vec[j]->scope) { vec[j]->merged_children.push_back(e); @@ -562,7 +562,7 @@ class StoragePlanRewriter : public StmtExprMutator { if (e->scope.tag.length() != 0) { MemoryInfo info = GetMemoryInfo(e->scope.to_string()); uint64_t total_elem = e->const_nbits / e->elem_type.bits(); - CHECK_LE(total_elem * e->elem_type.bits(), info->max_num_bits) + ICHECK_LE(total_elem * e->elem_type.bits(), info->max_num_bits) << "Allocation exceed bound of memory tag " << e->scope.to_string(); } } else { @@ -602,7 +602,7 @@ class StoragePlanRewriter : public StmtExprMutator { if (e->scope.tag.length() != 0) { MemoryInfo info = GetMemoryInfo(e->scope.to_string()); uint64_t total_elem = e->const_nbits / e->elem_type.bits(); - CHECK_LE(total_elem * e->elem_type.bits(), info->max_num_bits) + ICHECK_LE(total_elem * e->elem_type.bits(), info->max_num_bits) << "Allocation exceed bound of memory tag " << e->scope.to_string(); } } @@ -611,9 +611,9 @@ class StoragePlanRewriter : public StmtExprMutator { } // New allocation for merged data void NewAllocTagMerged(StorageEntry* e) { - CHECK_NE(e->scope.tag.length(), 0U); + ICHECK_NE(e->scope.tag.length(), 0U); // allocate with element type. - CHECK_NE(e->const_nbits, 0U); + ICHECK_NE(e->const_nbits, 0U); MemoryInfo info = GetMemoryInfo(e->scope.to_string()); uint64_t total_bits = e->const_nbits; // By default, align to 32 bits. @@ -628,8 +628,8 @@ class StoragePlanRewriter : public StmtExprMutator { } e->alloc_var = e->allocs[0]->buffer_var; for (StorageEntry* child : e->merged_children) { - CHECK_NE(child->const_nbits, 0U); - CHECK_NE(total_bits, 0U); + ICHECK_NE(child->const_nbits, 0U); + ICHECK_NE(total_bits, 0U); child->bits_offset = total_bits; child->alloc_var = e->alloc_var; total_bits += child->const_nbits; @@ -642,7 +642,7 @@ class StoragePlanRewriter : public StmtExprMutator { make_const(e->allocs[0]->extents[0].dtype(), (total_bits + type_bits - 1) / type_bits); e->new_alloc = Allocate(e->alloc_var, e->elem_type, {alloc_size}, const_true(), Evaluate(0)); if (info.defined()) { - CHECK_LE(total_bits, info->max_num_bits) + ICHECK_LE(total_bits, info->max_num_bits) << "Allocation exceed bound of memory tag " << e->scope.to_string(); } } @@ -675,7 +675,7 @@ class StoragePlanRewriter : public StmtExprMutator { } void PlanNewScope(const Object* op) { if (thread_scope_ != nullptr) { - CHECK(thread_scope_ == op); + ICHECK(thread_scope_ == op); // erase all memory atatched to this scope. for (auto it = const_free_map_.begin(); it != const_free_map_.end();) { if (it->second->attach_scope_ == op) { @@ -716,7 +716,7 @@ class StoragePlanRewriter : public StmtExprMutator { bool detect_inplace = detect_inplace_ && (it->second.gen.size() <= 2); for (const VarNode* var : it->second.gen) { - CHECK(alloc_info.count(var)); + ICHECK(alloc_info.count(var)); const AllocEntry& ae = alloc_info.at(var); StorageEntry* dst_entry = nullptr; // inplace detection @@ -758,7 +758,7 @@ class StoragePlanRewriter : public StmtExprMutator { attr::IsPragmaKey(op->attr_key)) { PlanNewScope(op); } else { - CHECK(op->attr_key == attr::extern_scope); + ICHECK(op->attr_key == attr::extern_scope); } } else if (s.stmt->IsInstance()) { const auto* op = static_cast(s.stmt); @@ -785,7 +785,7 @@ class StoragePlanRewriter : public StmtExprMutator { // Allocate new storage entry. StorageEntry* NewAlloc(const AllocateNode* op, const Object* attach_scope, const StorageScope& scope, size_t const_nbits) { - CHECK(op != nullptr); + ICHECK(op != nullptr); // Re-use not successful, allocate a new buffer. std::unique_ptr entry(new StorageEntry()); entry->attach_scope_ = attach_scope; @@ -799,7 +799,7 @@ class StoragePlanRewriter : public StmtExprMutator { StorageEntry* FindAlloc(const AllocateNode* op, const Object* attach_scope, const StorageScope& scope) { - CHECK(op != nullptr); + ICHECK(op != nullptr); // skip plan for local variable, // compiler can do a better job with register allocation. const uint64_t match_range = 16; @@ -858,9 +858,9 @@ class StoragePlanRewriter : public StmtExprMutator { // simulated free. void Free(const VarNode* var) { auto it = alloc_map_.find(var); - CHECK(it != alloc_map_.end()); + ICHECK(it != alloc_map_.end()); StorageEntry* e = it->second; - CHECK_NE(e->allocs.size(), 0U); + ICHECK_NE(e->allocs.size(), 0U); // disable reuse of small arrays, they will be lowered to registers in LLVM // This rules only apply if we are using non special memory @@ -989,7 +989,7 @@ PrimFunc PointerValueTypeRewrite(PrimFunc f) { } } - CHECK_EQ(args.size(), n->params.size()); + ICHECK_EQ(args.size(), n->params.size()); n->params = args; n->body = Substitute(n->body, remap_vars); return f; diff --git a/src/tir/transforms/tensorcore_infer_fragment.cc b/src/tir/transforms/tensorcore_infer_fragment.cc index 81c8645f3dda..d0f58074ada0 100644 --- a/src/tir/transforms/tensorcore_infer_fragment.cc +++ b/src/tir/transforms/tensorcore_infer_fragment.cc @@ -56,28 +56,28 @@ class FragmentGetter : public StmtExprVisitor { if (op->op.same_as(builtin::tvm_load_matrix_sync()) || op->op.same_as(builtin::tvm_store_matrix_sync())) { // Get shape and layout information from load and store intrinsic - CHECK_EQ(op->args.size(), 8U); + ICHECK_EQ(op->args.size(), 8U); const VarNode* buffer_var = op->args[0].as(); - CHECK(buffer_var); + ICHECK(buffer_var); // Get shape const IntImmNode* m = op->args[1].as(); const IntImmNode* n = op->args[2].as(); const IntImmNode* k = op->args[3].as(); const StringImmNode* layout = op->args[7].as(); - CHECK(m); - CHECK(n); - CHECK(k); - CHECK(layout); + ICHECK(m); + ICHECK(n); + ICHECK(k); + ICHECK(layout); std::string scope = scopes[buffer_var]; if (fragments.count(buffer_var)) { // check if the fragment has met before FragmentInfo info = fragments[buffer_var]; - CHECK_EQ(m->value, info.m); - CHECK_EQ(n->value, info.n); - CHECK_EQ(k->value, info.k); + ICHECK_EQ(m->value, info.m); + ICHECK_EQ(n->value, info.n); + ICHECK_EQ(k->value, info.k); if (scope == "wmma.matrix_a" || scope == "wmma.matrix_b") { - CHECK_EQ(layout->value, info.layout); + ICHECK_EQ(layout->value, info.layout); } } else { // store metadata @@ -91,25 +91,25 @@ class FragmentGetter : public StmtExprVisitor { } } else if (op->op.same_as(builtin::tvm_fill_fragment())) { // Get shape information from fill intrinsic - CHECK_EQ(op->args.size(), 6U); + ICHECK_EQ(op->args.size(), 6U); const VarNode* buffer_var = op->args[0].as(); - CHECK(buffer_var); + ICHECK(buffer_var); // Get shape const IntImmNode* m = op->args[1].as(); const IntImmNode* n = op->args[2].as(); const IntImmNode* k = op->args[3].as(); - CHECK(m); - CHECK(n); - CHECK(k); + ICHECK(m); + ICHECK(n); + ICHECK(k); std::string scope = scopes[buffer_var]; // Only wmma.accumulator can use tvm_fill_fragment - CHECK_EQ(scope, "wmma.accumulator"); + ICHECK_EQ(scope, "wmma.accumulator"); if (fragments.count(buffer_var)) { FragmentInfo info = fragments[buffer_var]; - CHECK_EQ(m->value, info.m); - CHECK_EQ(n->value, info.n); - CHECK_EQ(k->value, info.k); + ICHECK_EQ(m->value, info.m); + ICHECK_EQ(n->value, info.n); + ICHECK_EQ(k->value, info.k); } else { FragmentInfo info(m->value, n->value, k->value, ""); fragments[buffer_var] = info; @@ -121,7 +121,7 @@ class FragmentGetter : public StmtExprVisitor { void VisitStmt_(const AttrStmtNode* op) final { if (op->attr_key == attr::storage_scope) { const VarNode* buffer = op->node.as(); - CHECK(buffer); + ICHECK(buffer); scopes[buffer] = op->value.as()->value; } StmtExprVisitor::VisitStmt_(op); @@ -142,28 +142,28 @@ class FragmentChecker : public StmtExprVisitor { StmtExprVisitor::VisitExpr_(op); // Check shape when calling tvm_mma_sync if (op->op.same_as(builtin::tvm_mma_sync()) || op->op.same_as(builtin::tvm_bmma_sync())) { - CHECK_EQ(op->args.size(), 8U); + ICHECK_EQ(op->args.size(), 8U); const VarNode* buffer_var_d = op->args[0].as(); const VarNode* buffer_var_a = op->args[2].as(); const VarNode* buffer_var_b = op->args[4].as(); const VarNode* buffer_var_c = op->args[6].as(); - CHECK(buffer_var_d); - CHECK(buffer_var_a); - CHECK(buffer_var_b); - CHECK(buffer_var_c); + ICHECK(buffer_var_d); + ICHECK(buffer_var_a); + ICHECK(buffer_var_b); + ICHECK(buffer_var_c); // Check all fragment A, B, C and D have the same shape - CHECK(CheckShape(buffer_var_d, buffer_var_a)); - CHECK(CheckShape(buffer_var_d, buffer_var_b)); - CHECK(CheckShape(buffer_var_d, buffer_var_c)); + ICHECK(CheckShape(buffer_var_d, buffer_var_a)); + ICHECK(CheckShape(buffer_var_d, buffer_var_b)); + ICHECK(CheckShape(buffer_var_d, buffer_var_c)); } } private: // A tool for checking shapes of two fragments bool CheckShape(const VarNode* buffer1, const VarNode* buffer2) { - CHECK(fragment_getter.fragments.count(buffer1)); - CHECK(fragment_getter.fragments.count(buffer2)); + ICHECK(fragment_getter.fragments.count(buffer1)); + ICHECK(fragment_getter.fragments.count(buffer2)); FragmentGetter::FragmentInfo info1 = fragment_getter.fragments.at(buffer1); FragmentGetter::FragmentInfo info2 = fragment_getter.fragments.at(buffer2); return info1.m == info2.m && info1.n == info2.n && info1.k == info2.k; diff --git a/src/tir/transforms/thread_storage_sync.cc b/src/tir/transforms/thread_storage_sync.cc index 05ee8146cbd8..8f757171afbd 100644 --- a/src/tir/transforms/thread_storage_sync.cc +++ b/src/tir/transforms/thread_storage_sync.cc @@ -97,7 +97,7 @@ class ThreadSyncPlanner : public StorageAccessVisitor { } } if (sync_before_stmt) { - CHECK_EQ(condition_counter(), 0) << "Cannot insert syncs inside condition"; + ICHECK_EQ(condition_counter(), 0) << "Cannot insert syncs inside condition"; syncs_inserted_.insert(s.stmt); } } @@ -124,7 +124,7 @@ class ThreadSyncPlanner : public StorageAccessVisitor { } } if (sync_before_stmt) { - CHECK_EQ(condition_counter(), 0) << "Cannot insert syncs inside condition"; + ICHECK_EQ(condition_counter(), 0) << "Cannot insert syncs inside condition"; syncs_inserted_.insert(s.stmt); break; } @@ -263,7 +263,7 @@ class ThreadSyncInserter : public StmtExprMutator { if (op->op.same_as(builtin::tvm_access_ptr())) { PrimExpr expr = StmtExprMutator::VisitExpr_(op); op = expr.as(); - CHECK_EQ(op->args.size(), 5U); + ICHECK_EQ(op->args.size(), 5U); const VarNode* buffer_var = op->args[1].as(); Var var(GetRef(buffer_var)); const IntImmNode* flag = op->args[4].as(); @@ -297,7 +297,7 @@ class ThreadSyncInserter : public StmtExprMutator { } // private functions. Stmt InitGlobalBarrier(const AttrStmtNode* op) { - CHECK(op != nullptr); + ICHECK(op != nullptr); Array pargs = {StringImm(runtime::symbol::tvm_prepare_global_barrier)}; Stmt prep = Evaluate(Call(DataType::Int(32), builtin::tvm_call_packed(), pargs)); Stmt body = op->body; @@ -314,9 +314,9 @@ class ThreadSyncInserter : public StmtExprMutator { return SeqStmt({prep, body}); } Stmt MakeGlobalBarrier() { - CHECK(sync_scope_.rank == StorageRank::kGlobal); + ICHECK(sync_scope_.rank == StorageRank::kGlobal); if (!num_blocks_.defined()) { - CHECK(!is_lead_.defined()); + ICHECK(!is_lead_.defined()); num_work_dim_ = thread_extents_.size(); for (const AttrStmtNode* attr : thread_extents_) { IterVar iv = Downcast(attr->node); @@ -329,7 +329,7 @@ class ThreadSyncInserter : public StmtExprMutator { } } } else { - CHECK_EQ(num_work_dim_, thread_extents_.size()); + ICHECK_EQ(num_work_dim_, thread_extents_.size()); } return Evaluate(Call(DataType::Int(32), builtin::tvm_storage_sync(), {StringImm(sync_scope_.to_string()), is_lead_, num_blocks_})); diff --git a/src/tir/transforms/unroll_loop.cc b/src/tir/transforms/unroll_loop.cc index 122654149f24..71ad899273a6 100644 --- a/src/tir/transforms/unroll_loop.cc +++ b/src/tir/transforms/unroll_loop.cc @@ -107,7 +107,7 @@ class LoopUnroller : public StmtExprMutator { auto_unroll && (value * step_count_ <= auto_max_step_ || value <= auto_max_extent_); if (op->for_type == ForType::Unrolled) { - CHECK_GE(value, 0) << "Cannot unroll non-constant loop"; + ICHECK_GE(value, 0) << "Cannot unroll non-constant loop"; auto_unroll = true; } @@ -163,7 +163,7 @@ class LoopUnroller : public StmtExprMutator { Stmt Unroll(const ForNode* op) { int value = GetExtent(op); // For loop must have a constant integer extent - CHECK_NE(value, -1) << "loop doesn't have a constant integer extent"; + ICHECK_NE(value, -1) << "loop doesn't have a constant integer extent"; if (value == 0) return Evaluate(0); Stmt body = op->body; Map vmap; diff --git a/src/tir/transforms/vectorize_loop.cc b/src/tir/transforms/vectorize_loop.cc index bf54ada6e837..239f42266b83 100644 --- a/src/tir/transforms/vectorize_loop.cc +++ b/src/tir/transforms/vectorize_loop.cc @@ -45,8 +45,8 @@ inline PrimExpr BroadcastTo(PrimExpr e, int lanes) { return Broadcast(op->value, lanes); } } - CHECK_EQ(e.dtype().lanes(), 1) << "Cannot broadcast lane=" << e.dtype().lanes() << " to " - << lanes; + ICHECK_EQ(e.dtype().lanes(), 1) << "Cannot broadcast lane=" << e.dtype().lanes() << " to " + << lanes; return Broadcast(e, lanes); } @@ -105,7 +105,7 @@ class Vectorizer : public StmtMutator, public ExprFunctorvar); if (it != let_binding_.end()) { - CHECK(deep_equal_(it->second, value)) + ICHECK(deep_equal_(it->second, value)) << "Let cannot bind the same var to two different values"; } if (value.dtype().lanes() != op->value.dtype().lanes()) { @@ -355,8 +355,8 @@ class Vectorizer : public StmtMutator, public ExprFunctorfor_type == ForType::Vectorized) { LOG(WARNING) << "Detect vectorize inside vectorized loop, ignoring..."; } - CHECK(is_zero(op->min)); - CHECK(!op->extent.dtype().is_vector()); + ICHECK(is_zero(op->min)); + ICHECK(!op->extent.dtype().is_vector()); PrimExpr extent = this->VisitExpr(op->extent); if (extent.dtype().is_vector()) { return Scalarize(GetRef(op)); @@ -370,7 +370,7 @@ class Vectorizer : public StmtMutator, public ExprFunctorcondition.dtype().is_vector()); + ICHECK(!op->condition.dtype().is_vector()); PrimExpr condition = this->VisitExpr(op->condition); if (condition.dtype().is_vector()) { return Scalarize(GetRef(op)); @@ -390,7 +390,7 @@ class Vectorizer : public StmtMutator, public ExprFunctorVisitExpr(op->value); - CHECK(!let_binding_.count(op->var)) << "SSA violation, a single var is binded twice"; + ICHECK(!let_binding_.count(op->var)) << "SSA violation, a single var is binded twice"; let_binding_[op->var] = value; if (value.dtype().lanes() != op->value.dtype().lanes()) { @@ -526,7 +526,7 @@ class LoopVectorizer : public StmtMutator { public: Stmt VisitStmt_(const ForNode* op) final { if (op->for_type == ForType::Vectorized) { - CHECK(is_zero(op->min)); + ICHECK(is_zero(op->min)); auto* extent_as_int = op->extent.as(); if (!extent_as_int || extent_as_int->value < 1) { LOG(FATAL) << "Failed to vectorize loop with extent " << op->extent; diff --git a/src/topi/transform.cc b/src/topi/transform.cc index 19243803cdc9..2d7657eedcdd 100644 --- a/src/topi/transform.cc +++ b/src/topi/transform.cc @@ -150,7 +150,7 @@ TVM_REGISTER_GLOBAL("topi.matmul").set_body([](TVMArgs args, TVMRetValue* rv) { *rv = matmul(args[0], args[1], args[2], args[3]); break; default: - CHECK(0) << "topi.matmul expects 2, 3 or 4 arguments"; + ICHECK(0) << "topi.matmul expects 2, 3 or 4 arguments"; } });