From 23e794422a66ccfca8d58435e341c2af58f505e2 Mon Sep 17 00:00:00 2001 From: Krzysztof Parzyszek Date: Fri, 26 Aug 2022 15:59:53 -0500 Subject: [PATCH] Replace '> >' in templates with >>, NFC (#12615) The problem with greedy lexing of >> as an operator was solved in C++11, and now templates no longer require spaces between >'s. --- docs/arch/convert_layout.rst | 10 +++--- docs/arch/inferbound.rst | 4 +-- .../how_to/relay_bring_your_own_codegen.rst | 2 +- include/tvm/auto_scheduler/feature.h | 8 ++--- include/tvm/relay/attrs/image.h | 14 ++++---- include/tvm/runtime/module.h | 2 +- include/tvm/support/span.h | 2 +- include/tvm/te/operation.h | 2 +- include/tvm/topi/detail/extern.h | 2 +- include/tvm/topi/transform.h | 2 +- .../native/org_apache_tvm_native_c_api.cc | 4 +-- src/arith/analyzer.cc | 2 +- src/autotvm/touch_extractor.cc | 14 ++++---- src/contrib/ethosu/cascader/propagator.cc | 8 ++--- src/contrib/ethosu/cascader/propagator.h | 6 ++-- src/ir/span.cc | 2 +- src/node/reflection.cc | 2 +- src/printer/meta_data.h | 2 +- src/relay/analysis/dependency_graph.cc | 4 +-- src/relay/ir/transform.cc | 2 +- src/relay/transforms/convert_sparse_dense.cc | 8 ++--- src/relay/transforms/fuse_ops.cc | 2 +- src/relay/transforms/let_list.h | 2 +- src/relay/transforms/partial_eval.cc | 2 +- src/relay/transforms/type_infer.cc | 4 +-- src/runtime/contrib/ethosn/ethosn_device.cc | 6 ++-- src/runtime/graph_executor/graph_executor.cc | 4 +-- src/runtime/metal/metal_common.h | 4 +-- src/runtime/thread_pool.cc | 2 +- src/runtime/threading_backend.cc | 2 +- src/runtime/vm/pooled_allocator.h | 2 +- src/target/source/codegen_vhls.cc | 2 +- src/te/operation/compute_op.cc | 8 ++--- src/te/operation/compute_op.h | 4 +-- src/te/operation/tensor_compute_op.cc | 13 ++++--- src/te/operation/tensorize.cc | 29 ++++++++-------- src/te/schedule/graph.h | 6 ++-- src/te/schedule/schedule_dataflow_rewrite.cc | 2 +- src/tir/ir/buffer.cc | 8 ++--- src/tir/transforms/coproc_sync.cc | 34 +++++++++---------- src/tir/transforms/inject_double_buffer.cc | 4 +-- src/tir/transforms/inject_virtual_thread.cc | 2 +- src/tir/transforms/ir_utils.h | 2 +- src/tir/transforms/make_packed_api.cc | 6 ++-- src/tir/transforms/storage_access.h | 2 +- src/tir/transforms/storage_rewrite.cc | 4 +-- 46 files changed, 128 insertions(+), 130 deletions(-) diff --git a/docs/arch/convert_layout.rst b/docs/arch/convert_layout.rst index 53038e9605e8..51917fce44df 100644 --- a/docs/arch/convert_layout.rst +++ b/docs/arch/convert_layout.rst @@ -150,10 +150,10 @@ First example is for layout agnostic operators. These operators do not have any // .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout); // Take arbitrary input layouts and copy to outputs. - inline Array > ElemwiseArbitraryLayout(const Attrs& attrs, - const Array& new_in_layouts, - const Array& old_in_layouts, - const Array> &old_in_shapes) { + inline Array> ElemwiseArbitraryLayout(const Attrs& attrs, + const Array& new_in_layouts, + const Array& old_in_layouts, + const Array> &old_in_shapes) { Layout ret; if (new_in_layouts.defined()) { @@ -168,7 +168,7 @@ First example is for layout agnostic operators. These operators do not have any } } - return Array >{Array(old_in_layouts.size(), ret), {ret}}; + return Array>{Array(old_in_layouts.size(), ret), {ret}}; } diff --git a/docs/arch/inferbound.rst b/docs/arch/inferbound.rst index 9c78a9da7440..cc516359bdba 100644 --- a/docs/arch/inferbound.rst +++ b/docs/arch/inferbound.rst @@ -280,7 +280,7 @@ Phase 3: Propagate IntSets to consumer's input tensors /* * Input: Map dom_map: consumer root -> IntSet - * Output: Map tmap: output tensor -> vector > + * Output: Map tmap: output tensor -> vector> */ Note that the consumer's input tensors are output tensors of the stage InferBound is working on. So by establishing information about the consumer's input tensors, we actually obtain information about the stage's output tensors too: the consumers require certain regions of these tensors to be computed. This information can then be propagated through the rest of the stage, eventually obtaining Ranges for the stage's root_iter_vars by the end of Phase 4. @@ -306,7 +306,7 @@ Phase 4: Consolidate across all consumers .. code:: cpp /* - * Input: Map tmap: output tensor -> vector > + * Input: Map tmap: output tensor -> vector> * Output: Map rmap: rmap is populated for all of the stage's root_iter_vars */ diff --git a/docs/dev/how_to/relay_bring_your_own_codegen.rst b/docs/dev/how_to/relay_bring_your_own_codegen.rst index 304bd016dec2..c106bb2a6372 100644 --- a/docs/dev/how_to/relay_bring_your_own_codegen.rst +++ b/docs/dev/how_to/relay_bring_your_own_codegen.rst @@ -676,7 +676,7 @@ Again, we first define a customized runtime class as follows. The class has to b /* \brief The subgraph that being processed. */ std::string curr_subgraph_; /*! \brief A simple graph from subgraph id to node entries. */ - std::map > graph_; + std::map> graph_; /* \brief A simple pool to contain the tensor for each node in the graph. */ std::vector data_entry_; /* \brief A mapping from node id to op name. */ diff --git a/include/tvm/auto_scheduler/feature.h b/include/tvm/auto_scheduler/feature.h index 71d00f249210..a8b88b7f11f9 100644 --- a/include/tvm/auto_scheduler/feature.h +++ b/include/tvm/auto_scheduler/feature.h @@ -70,7 +70,7 @@ void GetPerStoreFeatureName(int max_n_bufs, std::vector* ret); */ void GetPerStoreFeaturesFromStates(const Array& states, const SearchTask& task, int skip_first_n_feature_extraction, int max_n_bufs, - std::vector >* features); + std::vector>* features); /*! * \brief Get per-store feature from states of different tasks @@ -83,7 +83,7 @@ void GetPerStoreFeaturesFromStates(const Array& states, const SearchTask& */ void GetPerStoreFeaturesFromStates(const Array& states, const std::vector& tasks, int skip_first_n_feature_extraction, int max_n_bufs, - std::vector >* features); + std::vector>* features); /*! * \brief Get per-store features from a log file @@ -96,7 +96,7 @@ void GetPerStoreFeaturesFromStates(const Array& states, const std::vector * \param task_ids The task ids for all states */ void GetPerStoreFeaturesFromFile(const std::string& filename, int max_lines, int max_n_bufs, - std::vector >* features, + std::vector>* features, std::vector* normalized_throughputs, std::vector* task_ids); @@ -114,7 +114,7 @@ void GetPerStoreFeaturesFromFile(const std::string& filename, int max_lines, int void GetPerStoreFeaturesFromMeasurePairs(const Array& inputs, const Array& results, int skip_first_n_feature_extraction, int max_n_bufs, - std::vector >* features, + std::vector>* features, std::vector* normalized_throughputs, std::vector* task_ids); diff --git a/include/tvm/relay/attrs/image.h b/include/tvm/relay/attrs/image.h index e0ee6dc748c2..43510ea68501 100644 --- a/include/tvm/relay/attrs/image.h +++ b/include/tvm/relay/attrs/image.h @@ -46,9 +46,9 @@ struct Resize1DAttrs : public tvm::AttrsNode { DataType out_dtype; TVM_DECLARE_ATTRS(Resize1DAttrs, "relay.attrs.Resize1DAttrs") { - TVM_ATTR_FIELD(size).set_default(NullValue >()).describe("Output Size."); + TVM_ATTR_FIELD(size).set_default(NullValue>()).describe("Output Size."); TVM_ATTR_FIELD(roi) - .set_default(NullValue >()) + .set_default(NullValue>()) .describe("Region of Interest for coordinate transformation mode 'tf_crop_and_resize'"); TVM_ATTR_FIELD(layout).set_default("NCW").describe( "Dimension ordering of input data. Can be 'NCW', 'NWC', etc." @@ -99,9 +99,9 @@ struct Resize2DAttrs : public tvm::AttrsNode { DataType out_dtype; TVM_DECLARE_ATTRS(Resize2DAttrs, "relay.attrs.Resize2DAttrs") { - TVM_ATTR_FIELD(size).set_default(NullValue >()).describe("Output Size."); + TVM_ATTR_FIELD(size).set_default(NullValue>()).describe("Output Size."); TVM_ATTR_FIELD(roi) - .set_default(NullValue >()) + .set_default(NullValue>()) .describe("Region of Interest for coordinate transformation mode 'tf_crop_and_resize'"); TVM_ATTR_FIELD(layout).set_default("NCHW").describe( "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc." @@ -152,9 +152,9 @@ struct Resize3DAttrs : public tvm::AttrsNode { DataType out_dtype; TVM_DECLARE_ATTRS(Resize3DAttrs, "relay.attrs.Resize3DAttrs") { - TVM_ATTR_FIELD(size).set_default(NullValue >()).describe("Output Size."); + TVM_ATTR_FIELD(size).set_default(NullValue>()).describe("Output Size."); TVM_ATTR_FIELD(roi) - .set_default(NullValue >()) + .set_default(NullValue>()) .describe("Region of Interest for coordinate transformation mode 'tf_crop_and_resize'"); TVM_ATTR_FIELD(layout).set_default("NCDHW").describe( "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc." @@ -200,7 +200,7 @@ struct CropAndResizeAttrs : public tvm::AttrsNode { DataType out_dtype; TVM_DECLARE_ATTRS(CropAndResizeAttrs, "relay.attrs.CropAndResizeAttrs") { - TVM_ATTR_FIELD(crop_size).set_default(NullValue >()).describe("Target Size."); + TVM_ATTR_FIELD(crop_size).set_default(NullValue>()).describe("Target Size."); TVM_ATTR_FIELD(layout).set_default("NCHW").describe( "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" diff --git a/include/tvm/runtime/module.h b/include/tvm/runtime/module.h index 9d139c9feff3..a54f98a558f3 100644 --- a/include/tvm/runtime/module.h +++ b/include/tvm/runtime/module.h @@ -234,7 +234,7 @@ class TVM_DLL ModuleNode : public Object { private: /*! \brief Cache used by GetImport */ - std::unordered_map > import_cache_; + std::unordered_map> import_cache_; std::mutex mutex_; }; diff --git a/include/tvm/support/span.h b/include/tvm/support/span.h index 603fb531f43a..689a48dee788 100644 --- a/include/tvm/support/span.h +++ b/include/tvm/support/span.h @@ -68,7 +68,7 @@ class Span { inline bool operator!=(iterator_base other) { return !(*this == other); } - template ::value> > + template ::value>> inline operator iterator_base() const { return iterator_base(ptr_, end_); } diff --git a/include/tvm/te/operation.h b/include/tvm/te/operation.h index e91a0930f37b..2c50f3c3157b 100644 --- a/include/tvm/te/operation.h +++ b/include/tvm/te/operation.h @@ -47,7 +47,7 @@ struct TensorDom { // constructor explicit TensorDom(int ndim) : data(ndim) {} /*! \brief The domain data */ - std::vector > data; + std::vector> data; }; /*! diff --git a/include/tvm/topi/detail/extern.h b/include/tvm/topi/detail/extern.h index 2561f8d1ca27..dee4bf70a729 100644 --- a/include/tvm/topi/detail/extern.h +++ b/include/tvm/topi/detail/extern.h @@ -75,7 +75,7 @@ using FExtern = std::function, Array)>; * be one output Tensor for each element of out_shapes, with dtype equal to the corresponding * element of out_types. */ -inline Array make_extern(const Array >& out_shapes, +inline Array make_extern(const Array>& out_shapes, const std::vector& out_types, const Array& inputs, FExtern fextern, std::string name, std::string tag, ::tvm::Map attrs) { diff --git a/include/tvm/topi/transform.h b/include/tvm/topi/transform.h index 7accbf86912d..4c96ed42f6e9 100644 --- a/include/tvm/topi/transform.h +++ b/include/tvm/topi/transform.h @@ -592,7 +592,7 @@ inline Array split(const Tensor& x, Array split_indices, int a begin_ids.push_back(idx); } - Array > out_shapes; + Array> out_shapes; for (size_t i = 0; i < begin_ids.size(); ++i) { PrimExpr out_axis_size; if (i == begin_ids.size() - 1) { diff --git a/jvm/native/src/main/native/org_apache_tvm_native_c_api.cc b/jvm/native/src/main/native/org_apache_tvm_native_c_api.cc index f7be0cf80eb0..f86191d45bbc 100644 --- a/jvm/native/src/main/native/org_apache_tvm_native_c_api.cc +++ b/jvm/native/src/main/native/org_apache_tvm_native_c_api.cc @@ -42,8 +42,8 @@ struct TVMFuncArgsThreadLocalEntry { std::vector tvmFuncArgValues; std::vector tvmFuncArgTypes; // for later release - std::vector > tvmFuncArgPushedStrs; - std::vector > tvmFuncArgPushedBytes; + std::vector> tvmFuncArgPushedStrs; + std::vector> tvmFuncArgPushedBytes; }; typedef dmlc::ThreadLocalStore TVMFuncArgsThreadLocalStore; diff --git a/src/arith/analyzer.cc b/src/arith/analyzer.cc index f32c9b2ff4cf..ad52a6578b24 100644 --- a/src/arith/analyzer.cc +++ b/src/arith/analyzer.cc @@ -186,7 +186,7 @@ TVM_REGISTER_GLOBAL("arith.CreateAnalyzer").set_body([](TVMArgs args, TVMRetValu return PackedFunc([self](TVMArgs args, TVMRetValue* ret) { // can't use make_shared due to noexcept(false) decl in destructor, // see https://stackoverflow.com/a/43907314 - auto ctx = std::shared_ptr >( + auto ctx = std::shared_ptr>( new With(self.get(), args[0])); auto fexit = [ctx](TVMArgs, TVMRetValue*) mutable { ctx.reset(); }; *ret = PackedFunc(fexit); diff --git a/src/autotvm/touch_extractor.cc b/src/autotvm/touch_extractor.cc index 10ead718bae2..dd3cf88f7bf6 100644 --- a/src/autotvm/touch_extractor.cc +++ b/src/autotvm/touch_extractor.cc @@ -220,7 +220,7 @@ void TouchExtractor::ExitMem_() {} * \note If you want to flatten these features as the input of your model, * You can use the faster one GetItervarFeatureFlatten below. */ -void GetItervarFeature(Stmt stmt, bool take_log, Array > >* ret_feature) { +void GetItervarFeature(Stmt stmt, bool take_log, Array>>* ret_feature) { // extract TouchExtractor touch_analyzer; touch_analyzer.Analyze(stmt); @@ -248,7 +248,7 @@ void GetItervarFeature(Stmt stmt, bool take_log, Array > > // serialize for front end for (auto var : vars) { - Array > feature_row; + Array> feature_row; ItervarFeature& fea = touch_analyzer.itervar_map[var]; feature_row.push_back(Array{tvm::tir::StringImm("_itervar_"), var}); @@ -389,10 +389,10 @@ void GetCurveSampleFeatureFlatten(Stmt stmt, int sample_n, std::vector* r }); int max_depth = 0; - std::map > reuse_curve; - std::map > count_curve; - std::map > topdown_curve; - std::map > bottomup_curve; + std::map> reuse_curve; + std::map> count_curve; + std::map> topdown_curve; + std::map> bottomup_curve; std::set innermost_buffers; std::set added; @@ -485,7 +485,7 @@ TVM_REGISTER_GLOBAL("autotvm.feature.GetItervarFeature") .set_body([](TVMArgs args, TVMRetValue* ret) { Stmt stmt = args[0]; bool take_log = args[1]; - Array > > ret_feature; + Array>> ret_feature; GetItervarFeature(stmt, take_log, &ret_feature); diff --git a/src/contrib/ethosu/cascader/propagator.cc b/src/contrib/ethosu/cascader/propagator.cc index 25b711a53d05..ca8aaf6e27d5 100644 --- a/src/contrib/ethosu/cascader/propagator.cc +++ b/src/contrib/ethosu/cascader/propagator.cc @@ -34,7 +34,7 @@ namespace ethosu { namespace cascader { void PropagatorNode::VisitAttrs(AttrVisitor* v) { - Array > tmp_transform; + Array> tmp_transform; for (const auto& vec : transform_) { tmp_transform.push_back(make_array(vec)); } @@ -43,7 +43,7 @@ void PropagatorNode::VisitAttrs(AttrVisitor* v) { v->Visit("_offset", &tmp_arr); } -Propagator::Propagator(const std::vector >& transform, +Propagator::Propagator(const std::vector>& transform, const std::vector& offset) { auto n = make_object(); size_t rows = transform.size(); @@ -102,8 +102,8 @@ StripeConfig PropagatorNode::propagate(const StripeConfig& stripe_config) const } TVM_REGISTER_GLOBAL("contrib.ethosu.cascader.Propagator") - .set_body_typed([](Array > transform, Array offset) { - std::vector > vtransform; + .set_body_typed([](Array> transform, Array offset) { + std::vector> vtransform; for (const auto& vec : transform) { vtransform.push_back(make_vector(vec)); } diff --git a/src/contrib/ethosu/cascader/propagator.h b/src/contrib/ethosu/cascader/propagator.h index 2d4bd0d0154a..3946d0806a0c 100644 --- a/src/contrib/ethosu/cascader/propagator.h +++ b/src/contrib/ethosu/cascader/propagator.h @@ -43,7 +43,7 @@ class PropagatorNode : public Object { void VisitAttrs(AttrVisitor* v); /*! \return The transform matrix to apply to the StripeConfigs */ - const std::vector > GetTransform() const { return transform_; } + const std::vector> GetTransform() const { return transform_; } /*! \return The offset vector to apply to the StripeConfigs */ const std::vector GetOffset() const { return offset_; } /*! \return The number of input dimensions */ @@ -92,7 +92,7 @@ class PropagatorNode : public Object { friend class Propagator; /*! \brief The transform matrix to apply to the StripeConfigs */ - std::vector > transform_; + std::vector> transform_; /*! \brief The offset vector to apply to the StripeConfigs */ std::vector offset_; }; @@ -124,7 +124,7 @@ class PropagatorNode : public Object { */ class Propagator : public ObjectRef { public: - Propagator(const std::vector >& transform, const std::vector& offset); + Propagator(const std::vector>& transform, const std::vector& offset); TVM_DEFINE_OBJECT_REF_METHODS(Propagator, ObjectRef, PropagatorNode); }; diff --git a/src/ir/span.cc b/src/ir/span.cc index 4a26f3a6eb11..e19bef4cb864 100644 --- a/src/ir/span.cc +++ b/src/ir/span.cc @@ -30,7 +30,7 @@ namespace tvm { ObjectPtr GetSourceNameNode(const String& name) { // always return pointer as the reference can change as map re-allocate. // or use another level of indirection by creating a unique_ptr - static std::unordered_map > source_map; + static std::unordered_map> source_map; auto sn = source_map.find(name); if (sn == source_map.end()) { diff --git a/src/node/reflection.cc b/src/node/reflection.cc index a0f83f6cf5ad..aa572e99658c 100644 --- a/src/node/reflection.cc +++ b/src/node/reflection.cc @@ -254,7 +254,7 @@ void NodeListAttrNames(TVMArgs args, TVMRetValue* ret) { Object* self = static_cast(args[0].value().v_handle); auto names = - std::make_shared >(ReflectionVTable::Global()->ListAttrNames(self)); + std::make_shared>(ReflectionVTable::Global()->ListAttrNames(self)); *ret = PackedFunc([names](TVMArgs args, TVMRetValue* rv) { int64_t i = args[0]; diff --git a/src/printer/meta_data.h b/src/printer/meta_data.h index b076ad07caaf..ddf0d78087ee 100644 --- a/src/printer/meta_data.h +++ b/src/printer/meta_data.h @@ -136,7 +136,7 @@ class TextMetaDataContext { private: /*! \brief additional metadata stored in TVM json format */ - std::unordered_map > meta_data_; + std::unordered_map> meta_data_; /*! \brief map from meta data into its string representation */ std::unordered_map meta_repr_; }; diff --git a/src/relay/analysis/dependency_graph.cc b/src/relay/analysis/dependency_graph.cc index 18913ca37562..91711fa4baa8 100644 --- a/src/relay/analysis/dependency_graph.cc +++ b/src/relay/analysis/dependency_graph.cc @@ -56,11 +56,11 @@ class DependencyGraph::Creator : private MixedModeVisitor { } void Depend(DependencyGraph::Node* parent, DependencyGraph::Node* child) { - auto* parent_link = arena_->make >(); + auto* parent_link = arena_->make>(); parent_link->value = parent; child->parents.Push(parent_link); - auto* child_link = arena_->make >(); + auto* child_link = arena_->make>(); child_link->value = child; parent->children.Push(child_link); } diff --git a/src/relay/ir/transform.cc b/src/relay/ir/transform.cc index 1a16cc9becf1..fc1f3a15077e 100644 --- a/src/relay/ir/transform.cc +++ b/src/relay/ir/transform.cc @@ -126,7 +126,7 @@ IRModule FunctionPassNode::operator()(IRModule mod, const PassContext& pass_ctx) IRModule updated_mod = mod->ShallowCopy(); - std::vector > updates; + std::vector> updates; for (const auto& kv : mod->functions) { // only process optimizable Relay Functions if (const auto* function_node = AsOptimizableFunctionNode(kv.second)) { diff --git a/src/relay/transforms/convert_sparse_dense.cc b/src/relay/transforms/convert_sparse_dense.cc index faba366eca49..7053f1301cca 100644 --- a/src/relay/transforms/convert_sparse_dense.cc +++ b/src/relay/transforms/convert_sparse_dense.cc @@ -73,7 +73,7 @@ TVM_REGISTER_GLOBAL("relay.analysis.search_dense_op_weight").set_body_typed(Sear class DenseToSparseDenseMutator : public ExprRewriter { public: DenseToSparseDenseMutator(const Array& weight_name, - const Array >& weight_shape) + const Array>& weight_shape) : dense_op_(Op::Get("nn.dense")), sparse_dense_op_(Op::Get("nn.sparse_dense")) { ICHECK_EQ(weight_name.size(), weight_shape.size()); for (size_t i = 0; i < weight_name.size(); ++i) { @@ -117,11 +117,11 @@ class DenseToSparseDenseMutator : public ExprRewriter { // Cached op const Op& dense_op_; const Op& sparse_dense_op_; - std::unordered_map > target_weights_; + std::unordered_map> target_weights_; }; // class DenseToSparseDenseAlter Expr DenseToSparse(const Expr& e, const Array& weight_name, - const Array >& weight_shape) { + const Array>& weight_shape) { auto rewriter = DenseToSparseDenseMutator(weight_name, weight_shape); return PostOrderRewrite(e, &rewriter); } @@ -129,7 +129,7 @@ Expr DenseToSparse(const Expr& e, const Array& weight_name, namespace transform { Pass DenseToSparse(const Array& weight_name, - const Array >& weight_shape) { + const Array>& weight_shape) { runtime::TypedPackedFunc pass_func = [=](Function f, IRModule m, PassContext pc) { // Remove FreeVar warnings diff --git a/src/relay/transforms/fuse_ops.cc b/src/relay/transforms/fuse_ops.cc index 1ced0883a14c..dac5dc69ead5 100644 --- a/src/relay/transforms/fuse_ops.cc +++ b/src/relay/transforms/fuse_ops.cc @@ -180,7 +180,7 @@ class IndexedForwardGraph::Creator : private ExprVisitor { graph_.node_map[key] = current; } if (parent != nullptr) { - auto* link = arena_->make >(); + auto* link = arena_->make>(); link->value.node = parent; link->value.pattern = pattern; current->outputs.Push(link); diff --git a/src/relay/transforms/let_list.h b/src/relay/transforms/let_list.h index f449d6c3b011..f908fbcee514 100644 --- a/src/relay/transforms/let_list.h +++ b/src/relay/transforms/let_list.h @@ -145,7 +145,7 @@ class LetList { } private: - std::vector > lets_; + std::vector> lets_; bool used_ = false; }; diff --git a/src/relay/transforms/partial_eval.cc b/src/relay/transforms/partial_eval.cc index fc9922ca03ef..f791192e25c1 100644 --- a/src/relay/transforms/partial_eval.cc +++ b/src/relay/transforms/partial_eval.cc @@ -772,7 +772,7 @@ class PartialEvaluator : public ExprFunctor if (func->HasNonzeroAttr(attr::kPrimitive)) { return ConstEvaluateFunc(func); } - std::vector > free_vars; + std::vector> free_vars; for (const auto& v : FreeVars(func)) { if (v != var) { free_vars.push_back(std::pair(v, env_.Lookup(v))); diff --git a/src/relay/transforms/type_infer.cc b/src/relay/transforms/type_infer.cc index 9c01c40517f4..d2eb48073f7d 100644 --- a/src/relay/transforms/type_infer.cc +++ b/src/relay/transforms/type_infer.cc @@ -829,7 +829,7 @@ void EnsureCheckedType(const Expr& e) { AllCheckTypePopulated().VisitExpr(e); } // TODO(@jroesch): Can we optimize this? void AddGlobalTypes(IRModule mod) { - std::vector > updates; + std::vector> updates; for (const auto& it : mod->functions) { // Currently we don't type check TIR. // The inferencer will only check Relay functions @@ -961,7 +961,7 @@ Pass InferType() { // Add all the type annotations to the functions in the model. AddGlobalTypes(mod); - std::vector > updates; + std::vector> updates; for (const auto& it : updated_mod->functions) { // Currently we don't type check TIR. // diff --git a/src/runtime/contrib/ethosn/ethosn_device.cc b/src/runtime/contrib/ethosn/ethosn_device.cc index 628f99788d16..900ae65afcc3 100644 --- a/src/runtime/contrib/ethosn/ethosn_device.cc +++ b/src/runtime/contrib/ethosn/ethosn_device.cc @@ -87,7 +87,7 @@ void CopyOutput(dl::Buffer* source_buffers[], std::vector* outputs) { } } -void CreateBuffers(std::vector >* fm, +void CreateBuffers(std::vector>* fm, const std::vector& tensors, const std::vector& tensor_sizes, bool input) { for (size_t i = 0; i < tensors.size(); i++) { @@ -118,11 +118,11 @@ bool Inference(tvm::runtime::TVMArgs args, dl::Network* npu, } // Set up input buffers - std::vector > ifm(inputs.size()); + std::vector> ifm(inputs.size()); CreateBuffers(&ifm, inputs, input_sizes, true); // Set up output buffers - std::vector > ofm(outputs.size()); + std::vector> ofm(outputs.size()); CreateBuffers(&ofm, outputs, output_sizes, false); // Raw pointers for the inference diff --git a/src/runtime/graph_executor/graph_executor.cc b/src/runtime/graph_executor/graph_executor.cc index 78e65f6f2319..e3113dbfe54c 100644 --- a/src/runtime/graph_executor/graph_executor.cc +++ b/src/runtime/graph_executor/graph_executor.cc @@ -519,8 +519,8 @@ void GraphExecutor::SetupOpExecs() { } } -std::pair, std::shared_ptr > -GraphExecutor::CreateTVMOp(const TVMOpParam& param, const std::vector& args) { +std::pair, std::shared_ptr> GraphExecutor::CreateTVMOp( + const TVMOpParam& param, const std::vector& args) { std::shared_ptr arg_ptr = std::make_shared(); // setup address. arg_ptr->args = args; diff --git a/src/runtime/metal/metal_common.h b/src/runtime/metal/metal_common.h index 47a5999fdce9..dad156bcdddc 100644 --- a/src/runtime/metal/metal_common.h +++ b/src/runtime/metal/metal_common.h @@ -133,7 +133,7 @@ class Stream { class MetalWorkspace final : public DeviceAPI { public: // the devices - std::vector > devices; + std::vector> devices; // Warp size constant std::vector warp_size; // Whether it is initialized. @@ -186,7 +186,7 @@ class MetalThreadEntry { /*! \brief The current stream */ std::vector stream; /*! \brief The shared buffer used for copy. */ - std::vector > temp_buffer_; + std::vector> temp_buffer_; /*! \brief workspace pool */ WorkspacePool pool; // constructor diff --git a/src/runtime/thread_pool.cc b/src/runtime/thread_pool.cc index 7744174ec866..665244d3d1bd 100644 --- a/src/runtime/thread_pool.cc +++ b/src/runtime/thread_pool.cc @@ -369,7 +369,7 @@ class ThreadPool { int num_workers_used_; // if or not to exclude worker 0 and use main to run task 0 bool exclude_worker0_{true}; - std::vector > queues_; + std::vector> queues_; std::unique_ptr threads_; }; diff --git a/src/runtime/threading_backend.cc b/src/runtime/threading_backend.cc index 14b5f27dd495..ef1aa69f6455 100644 --- a/src/runtime/threading_backend.cc +++ b/src/runtime/threading_backend.cc @@ -285,7 +285,7 @@ class ThreadGroup::Impl { // is not supported in earlier versions of QuRT. In such cases assume 4. if (threads == 0) threads = 4; #endif - std::vector > max_freqs; + std::vector> max_freqs; for (unsigned int i = 0; i < threads; ++i) { int64_t cur_freq = 0; diff --git a/src/runtime/vm/pooled_allocator.h b/src/runtime/vm/pooled_allocator.h index e5f236983a73..9c11c783011e 100644 --- a/src/runtime/vm/pooled_allocator.h +++ b/src/runtime/vm/pooled_allocator.h @@ -99,7 +99,7 @@ class PooledAllocator final : public Allocator { private: size_t page_size_; std::atomic used_memory_; - std::unordered_map > memory_pool_; + std::unordered_map> memory_pool_; std::recursive_mutex mu_; Device device_; }; diff --git a/src/target/source/codegen_vhls.cc b/src/target/source/codegen_vhls.cc index 9896d8b833f9..4091b64f4524 100644 --- a/src/target/source/codegen_vhls.cc +++ b/src/target/source/codegen_vhls.cc @@ -157,7 +157,7 @@ runtime::Module BuildSDAccel(IRModule mod, Target target) { std::string whole_code = cg.Finish(); // Generate source code for compilation. - Array > kernel_info; + Array> kernel_info; for (auto kv : mod->functions) { ICHECK(kv.second->IsInstance()) << "CodeGenOpenCL: Can only take PrimFunc"; diff --git a/src/te/operation/compute_op.cc b/src/te/operation/compute_op.cc index c3062045939a..7f8facad5568 100644 --- a/src/te/operation/compute_op.cc +++ b/src/te/operation/compute_op.cc @@ -357,10 +357,10 @@ Stmt MakeComputeStmt(const ComputeOpNode* self, const Stage& stage, init = MergeNest(n.init_nest, init); init = Substitute(init, n.init_vmap); // common nest - std::vector > common(n.main_nest.begin(), - n.main_nest.begin() + n.num_common_loop + 1); - std::vector > reduce(n.main_nest.begin() + n.num_common_loop + 1, - n.main_nest.end()); + std::vector> common(n.main_nest.begin(), + n.main_nest.begin() + n.num_common_loop + 1); + std::vector> reduce(n.main_nest.begin() + n.num_common_loop + 1, + n.main_nest.end()); provide = MergeNest(reduce, provide); if (debug_keep_trivial_loop) { provide = MergeNest(common, provide); diff --git a/src/te/operation/compute_op.h b/src/te/operation/compute_op.h index 2661eb976f2e..944334a41fdb 100644 --- a/src/te/operation/compute_op.h +++ b/src/te/operation/compute_op.h @@ -41,13 +41,13 @@ struct ComputeLoopNest { // predicates for the initialize loop std::vector init_predicates; // Initialization nest involved. - std::vector > init_nest; + std::vector> init_nest; // Value map for the init code std::unordered_map init_vmap; // Predicates for the main update loop std::vector main_predicates; // The general loop nest - std::vector > main_nest; + std::vector> main_nest; // Value map for the IterVar. std::unordered_map main_vmap; diff --git a/src/te/operation/tensor_compute_op.cc b/src/te/operation/tensor_compute_op.cc index 262e5a2b97f4..00f751c58a09 100644 --- a/src/te/operation/tensor_compute_op.cc +++ b/src/te/operation/tensor_compute_op.cc @@ -202,7 +202,7 @@ Stmt TensorComputeOpNode::BuildProvide(const Stage& stage, ComputeLoopNest n = ComputeLoopNest::Create(this, stage, dom_map, debug_keep_trivial_loop); if (this->reduce_axis.size() == 0) { - std::vector > nest(n.main_nest.begin(), n.main_nest.begin() + tloc + 1); + std::vector> nest(n.main_nest.begin(), n.main_nest.begin() + tloc + 1); nest.emplace_back(MakeIfNest(n.main_predicates)); ICHECK_EQ(n.init_predicates.size(), 0U); ICHECK(this->intrin->body.defined()) @@ -219,16 +219,15 @@ Stmt TensorComputeOpNode::BuildProvide(const Stage& stage, ICHECK(this->intrin->reduce_update.defined()) << "Reduction update op is not defined"; // Need init and update steps ICHECK_NE(this->reduce_axis.size(), 0U); - std::vector > common(n.main_nest.begin(), - n.main_nest.begin() + n.num_common_loop + 1); - std::vector > update_nest(n.main_nest.begin() + n.num_common_loop + 1, - n.main_nest.begin() + tloc + 1); + std::vector> common(n.main_nest.begin(), + n.main_nest.begin() + n.num_common_loop + 1); + std::vector> update_nest(n.main_nest.begin() + n.num_common_loop + 1, + n.main_nest.begin() + tloc + 1); update_nest.emplace_back(MakeIfNest(n.main_predicates)); if (this->intrin->reduce_init.defined()) { // init nest - std::vector > init_nest(n.init_nest.begin(), - n.init_nest.begin() + tloc + 1); + std::vector> init_nest(n.init_nest.begin(), n.init_nest.begin() + tloc + 1); init_nest.emplace_back(MakeIfNest(n.init_predicates)); Stmt init = MergeNest(output_bind_nest, this->intrin->reduce_init); init = te::Substitute(init, n.init_vmap); diff --git a/src/te/operation/tensorize.cc b/src/te/operation/tensorize.cc index b31b61b739c1..138aeeb37f19 100644 --- a/src/te/operation/tensorize.cc +++ b/src/te/operation/tensorize.cc @@ -42,7 +42,7 @@ using namespace tir; size_t InferTensorizeRegion(const ComputeOpNode* self, const Stage& stage, const std::unordered_map& dom_map, std::unordered_map* out_dom, - std::unordered_map >* in_region) { + std::unordered_map>* in_region) { // Get the bound of the tensorized scope. bool found_point = false; size_t loc_scope = 0; @@ -198,7 +198,7 @@ class TensorIntrinMatcher final : public StmtExprMutator { void Init(const ComputeOpNode* self, const Stage& stage, const std::unordered_map& dom_map, const std::unordered_map& out_dom, - const std::unordered_map >& in_region, const TensorIntrin& intrin, + const std::unordered_map>& in_region, const TensorIntrin& intrin, Map* compute_intrin_iter_space) { ICHECK(self == stage->op.get()); @@ -298,7 +298,7 @@ class TensorIntrinMatcher final : public StmtExprMutator { Array MatchTensorizeBody(const ComputeOpNode* self, const Stage& stage, const std::unordered_map& dom_map, const std::unordered_map& out_dom, - const std::unordered_map >& in_region, + const std::unordered_map>& in_region, const TensorIntrin& intrin, Map* compute_intrin_iter_space) { TensorIntrinMatcher matcher; @@ -314,7 +314,7 @@ void VerifyTensorizeBody(const ComputeOpNode* self, const Stage& stage, const std::unordered_map& value_map, const std::unordered_map& dom_map, const std::unordered_map& out_dom, - const std::unordered_map >& in_region, + const std::unordered_map>& in_region, const TensorIntrin& intrin) { StructuralEqual expr_equal; Map compute_intrin_iter_space; @@ -346,7 +346,7 @@ Stmt MakeTensorize(const ComputeOpNode* self, const Stage& stage, const std::unordered_map& dom_map, bool debug_keep_trivial_loop) { std::unordered_map out_dom; - std::unordered_map > in_region; + std::unordered_map> in_region; size_t tloc = InferTensorizeRegion(self, stage, dom_map, &out_dom, &in_region); TensorIntrin intrin = stage->iter_var_attrs.at(stage->leaf_iter_vars[tloc])->tensor_intrin; ICHECK(intrin.defined()); @@ -418,7 +418,7 @@ Stmt MakeTensorize(const ComputeOpNode* self, const Stage& stage, } if (tloc <= n.num_common_loop) { // Do no need to split reduction - std::vector > nest(n.main_nest.begin(), n.main_nest.begin() + tloc + 1); + std::vector> nest(n.main_nest.begin(), n.main_nest.begin() + tloc + 1); nest.emplace_back(MakeIfNest(n.main_predicates)); ICHECK_EQ(n.init_predicates.size(), 0U); ICHECK(intrin->body.defined()) << "Normal store op for intrin " << intrin << " is not defined"; @@ -434,16 +434,15 @@ Stmt MakeTensorize(const ComputeOpNode* self, const Stage& stage, << "Reduction update op for intrin " << intrin << " is not defined"; // Need init and update steps ICHECK_NE(self->reduce_axis.size(), 0U); - std::vector > common(n.main_nest.begin(), - n.main_nest.begin() + n.num_common_loop + 1); - std::vector > update_nest(n.main_nest.begin() + n.num_common_loop + 1, - n.main_nest.begin() + tloc + 1); + std::vector> common(n.main_nest.begin(), + n.main_nest.begin() + n.num_common_loop + 1); + std::vector> update_nest(n.main_nest.begin() + n.num_common_loop + 1, + n.main_nest.begin() + tloc + 1); update_nest.emplace_back(MakeIfNest(n.main_predicates)); if (intrin->reduce_init.defined()) { // init nest - std::vector > init_nest(n.init_nest.begin(), - n.init_nest.begin() + tloc + 1); + std::vector> init_nest(n.init_nest.begin(), n.init_nest.begin() + tloc + 1); init_nest.emplace_back(MakeIfNest(n.init_predicates)); Stmt init = MergeNest(output_bind_nest, intrin->reduce_init); init = te::Substitute(init, n.init_vmap); @@ -476,17 +475,17 @@ TVM_REGISTER_GLOBAL("test.op.InferTensorizeRegion").set_body([](TVMArgs args, TV Stage stage = args[0]; Map dmap = args[1]; std::unordered_map out_dom; - std::unordered_map > in_region; + std::unordered_map> in_region; ICHECK(stage->op.as()); InferTensorizeRegion(stage->op.as(), stage, as_unordered_map(dmap), &out_dom, &in_region); - *ret = Array{Map(out_dom), Map >(in_region)}; + *ret = Array{Map(out_dom), Map>(in_region)}; }); TVM_REGISTER_GLOBAL("test.op.MatchTensorizeBody").set_body([](TVMArgs args, TVMRetValue* ret) { Stage stage = args[0]; Map out_dom = args[1]; - Map > in_region = args[2]; + Map> in_region = args[2]; TensorIntrin intrin = args[3]; Map vrange; ICHECK(stage->op.as()); diff --git a/src/te/schedule/graph.h b/src/te/schedule/graph.h index bb98ff4b706d..d31473d1b5a0 100644 --- a/src/te/schedule/graph.h +++ b/src/te/schedule/graph.h @@ -38,17 +38,17 @@ namespace te { /*! * \brief data structure of Operation->Tensors it reads */ -using ReadGraph = Map >; +using ReadGraph = Map>; /*! * \brief AttachPath maps op-> a list of IterVar */ -using AttachPath = Map >; +using AttachPath = Map>; /*! * \brief The map between tensor and operation it feeds to. */ -using FeedGraph = std::unordered_map >; +using FeedGraph = std::unordered_map>; /*! * \brief Get read graph of each operation to all the diff --git a/src/te/schedule/schedule_dataflow_rewrite.cc b/src/te/schedule/schedule_dataflow_rewrite.cc index a8363fd084cd..39243bf2216f 100644 --- a/src/te/schedule/schedule_dataflow_rewrite.cc +++ b/src/te/schedule/schedule_dataflow_rewrite.cc @@ -507,7 +507,7 @@ void RebaseNonZeroMinLoop(ScheduleNode* sch) { void InjectInline(ScheduleNode* sch, bool feature_extraction_mode) { sch->InvalidateCache(); - std::vector > new_body(sch->stages.size()); + std::vector> new_body(sch->stages.size()); std::vector changed(sch->stages.size(), false); std::vector new_hybrid_body(sch->stages.size()); std::vector hybrid_changed(sch->stages.size(), false); diff --git a/src/tir/ir/buffer.cc b/src/tir/ir/buffer.cc index 1ac0f1f1705e..cae4109a6026 100644 --- a/src/tir/ir/buffer.cc +++ b/src/tir/ir/buffer.cc @@ -152,7 +152,7 @@ inline std::pair MergeMulModInner(arith::Analyzer* analyzer, // Otherwise, the elements will be added to the no_opt_sum variable inline void MergeMulModInsertElements(const std::vector& eles, std::list* mult_exprs, - std::list >* mod_exprs, + std::list>* mod_exprs, PrimExpr* no_opt_sum, bool* has_mult, bool* has_mod) { using namespace tir; *has_mult = false; @@ -194,13 +194,13 @@ inline PrimExpr MergeMulMod(arith::Analyzer* analyzer, const PrimExpr& base) { simplified_base = analyzer->Simplify(simplified_base); std::vector eles = ExprSplitAddition(simplified_base); std::list mult_exprs; - std::list > mod_exprs; + std::list> mod_exprs; PrimExpr no_opt_sum; bool has_mult; bool has_mod; MergeMulModInsertElements(eles, &mult_exprs, &mod_exprs, &no_opt_sum, &has_mult, &has_mod); bool find_opt = false; - std::list >::iterator search_mod_it = mod_exprs.begin(); + std::list>::iterator search_mod_it = mod_exprs.begin(); // 2. Exhaustive Search while (search_mod_it != mod_exprs.end()) { std::list::iterator mult_it = mult_exprs.begin(); @@ -238,7 +238,7 @@ inline PrimExpr MergeMulMod(arith::Analyzer* analyzer, const PrimExpr& base) { for (std::list::iterator it = mult_exprs.begin(); it != mult_exprs.end(); ++it) { no_opt_sum = no_opt_sum.get() ? no_opt_sum + *it : *it; } - for (std::list >::iterator it = mod_exprs.begin(); + for (std::list>::iterator it = mod_exprs.begin(); it != mod_exprs.end(); ++it) { no_opt_sum = no_opt_sum.get() ? no_opt_sum + indexmod(it->first, it->second) : indexmod(it->first, it->second); diff --git a/src/tir/transforms/coproc_sync.cc b/src/tir/transforms/coproc_sync.cc index f3a9f990599f..1b1cabeadb71 100644 --- a/src/tir/transforms/coproc_sync.cc +++ b/src/tir/transforms/coproc_sync.cc @@ -111,7 +111,7 @@ class CoProcSyncPlanner : public StorageAccessVisitor { } // Write synchronization to be inserted before or after stmt. - std::unordered_map > sync_; + std::unordered_map> sync_; protected: bool Enabled(const VarNode* buf, const StorageScope& scope) const final { @@ -230,8 +230,8 @@ class CoProcBarrierDetector : public StorageAccessVisitor { PlanWriteBarrier(scope_.back(), nullptr); } - std::unordered_map > barrier_before_; - std::unordered_map > barrier_after_; + std::unordered_map> barrier_before_; + std::unordered_map> barrier_after_; protected: bool Enabled(const VarNode* buf, const StorageScope& scope) const final { @@ -251,7 +251,7 @@ class CoProcBarrierDetector : public StorageAccessVisitor { // Plan write barrier at Read after write point. std::vector PlanWriteBarrier(std::vector seq, const ForNode* loop) { std::vector read_seq; - std::unordered_map > write_set; + std::unordered_map> write_set; auto fupdate = [&](size_t i, const AccessEntry& acc) { auto it = write_set.find(acc.buffer.get()); @@ -289,7 +289,7 @@ class CoProcBarrierDetector : public StorageAccessVisitor { std::vector PlanReadBarrier(std::vector seq, const ForNode* loop) { std::vector write_seq; - std::unordered_map > read_set; + std::unordered_map> read_set; auto fupdate = [&](size_t i, const AccessEntry& acc) { auto it = read_set.find(acc.buffer.get()); @@ -443,8 +443,8 @@ class CoProcInstDepDetector : public StmtVisitor { // insert before is stored in reverse order // the first element is closest to the node. - std::unordered_map > insert_before_; - std::unordered_map > insert_after_; + std::unordered_map> insert_before_; + std::unordered_map> insert_after_; private: // state in the sync entry @@ -456,9 +456,9 @@ class CoProcInstDepDetector : public StmtVisitor { // Set of all possible contexts in the exit moment. std::unordered_set exit_ctx; // existing pop performed at enter - std::vector > enter_pop; + std::vector> enter_pop; // existing push performed at exit - std::vector > exit_push; + std::vector> exit_push; // clear the state void clear() { node = nullptr; @@ -473,8 +473,8 @@ class CoProcInstDepDetector : public StmtVisitor { // return the push/pop message at enter/exit of the Block // after considering the existing unmatcheded events and added events void InjectSync(const SyncState& prev, const SyncState& next, - std::vector >* prev_exit_push, - std::vector >* next_enter_pop) { + std::vector>* prev_exit_push, + std::vector>* next_enter_pop) { prev_exit_push->clear(); next_enter_pop->clear(); // quick path @@ -491,9 +491,9 @@ class CoProcInstDepDetector : public StmtVisitor { return; } // complicate path. - std::vector > vpush = prev.exit_push; - std::vector > vpop = next.enter_pop; - std::vector > pending; + std::vector> vpush = prev.exit_push; + std::vector> vpop = next.enter_pop; + std::vector> pending; for (int from : prev.exit_ctx) { for (int to : next.enter_ctx) { if (from != to) { @@ -556,7 +556,7 @@ class CoProcInstDepDetector : public StmtVisitor { void UpdateState() { if (last_state_.node != nullptr) { - std::vector > t1, t2; + std::vector> t1, t2; InjectSync(last_state_, curr_state_, &t1, &t2); std::swap(last_state_, curr_state_); } else { @@ -642,8 +642,8 @@ class CoProcSyncInserter : public StmtMutator { private: // insert before is stored in reverse order // the first element is closest to the node. - std::unordered_map > insert_before_; - std::unordered_map > insert_after_; + std::unordered_map> insert_before_; + std::unordered_map> insert_after_; }; Stmt CoProcSync(Stmt stmt) { return CoProcSyncInserter().Insert(std::move(stmt)); } diff --git a/src/tir/transforms/inject_double_buffer.cc b/src/tir/transforms/inject_double_buffer.cc index 03f2ccd40dd1..d974e3c8108a 100644 --- a/src/tir/transforms/inject_double_buffer.cc +++ b/src/tir/transforms/inject_double_buffer.cc @@ -299,9 +299,9 @@ class DoubleBufferInjector : public StmtExprMutator { // The current loop next std::vector loop_nest_; // The allocs to be appended before the loop - std::unordered_map > loop_allocs_; + std::unordered_map> loop_allocs_; // The stmt to be appended before the loop - std::unordered_map > loop_pre_; + std::unordered_map> loop_pre_; // The allocation size of the buffer std::unordered_map dbuffer_info_; // The updated Buffer objects diff --git a/src/tir/transforms/inject_virtual_thread.cc b/src/tir/transforms/inject_virtual_thread.cc index 83722d7b8aab..455140c75c13 100644 --- a/src/tir/transforms/inject_virtual_thread.cc +++ b/src/tir/transforms/inject_virtual_thread.cc @@ -177,7 +177,7 @@ class VarTouchedAnalysis : public StmtVisitor { // Whether variable is touched by the thread variable. std::unordered_set touched_var_; // x -> all the buffers x read from - std::unordered_map > affect_; + std::unordered_map> affect_; }; // Inject virtual thread loop diff --git a/src/tir/transforms/ir_utils.h b/src/tir/transforms/ir_utils.h index a54eebe4ed05..6915a0e3acc9 100644 --- a/src/tir/transforms/ir_utils.h +++ b/src/tir/transforms/ir_utils.h @@ -54,7 +54,7 @@ Stmt MergeNest(const std::vector& nest, Stmt body); * \param body body * \return The combined Stmt */ -Stmt MergeNest(const std::vector >& nest, Stmt body); +Stmt MergeNest(const std::vector>& nest, Stmt body); /*! * \brief update array with an unary function diff --git a/src/tir/transforms/make_packed_api.cc b/src/tir/transforms/make_packed_api.cc index 35c96e4fe4e1..4f8ad1223cd2 100644 --- a/src/tir/transforms/make_packed_api.cc +++ b/src/tir/transforms/make_packed_api.cc @@ -204,8 +204,8 @@ PrimFunc MakePackedAPI(PrimFunc&& func, int num_unpacked_args) { } // Need to re-declare vars, in case some arguments also appears in the buffer. - std::vector > var_def; - std::vector > buffer_def; + std::vector> var_def; + std::vector> buffer_def; for (int i = 0; i < static_cast(func_ptr->params.size()); ++i) { Var param = func_ptr->params[i]; @@ -343,7 +343,7 @@ Pass MakePackedAPI(int num_unpacked_args) { // packed arguments anyway while `num_unpacked_args` is -1 auto pass_func = [num_unpacked_args](IRModule m, PassContext ctx) { IRModuleNode* mptr = m.CopyOnWrite(); - std::vector > updates; + std::vector> updates; for (const auto& kv : mptr->functions) { if (auto* n = kv.second.as()) { diff --git a/src/tir/transforms/storage_access.h b/src/tir/transforms/storage_access.h index a48ee73f17fc..ac64e2f5cb65 100644 --- a/src/tir/transforms/storage_access.h +++ b/src/tir/transforms/storage_access.h @@ -125,7 +125,7 @@ class StorageAccessVisitor : public StmtExprVisitor { */ StorageScope GetScope(Var buffer_var) const; // access scope - std::vector > scope_; + std::vector> scope_; private: // whether access appending is enabled. diff --git a/src/tir/transforms/storage_rewrite.cc b/src/tir/transforms/storage_rewrite.cc index acb052650036..177017f9a245 100644 --- a/src/tir/transforms/storage_rewrite.cc +++ b/src/tir/transforms/storage_rewrite.cc @@ -1010,11 +1010,11 @@ class StoragePlanRewriter : public StmtExprMutator { // symbolic free list, for non constant items. std::list sym_free_list_; // The allocation attach map - std::unordered_map > attach_map_; + std::unordered_map> attach_map_; // The allocation assign map std::unordered_map alloc_map_; // The allocations - std::vector > alloc_vec_; + std::vector> alloc_vec_; // The buffer objects being remapped std::unordered_map buffer_remap_; // analyzer