From ba6d1ecf953172b41a1d3f8a35a30b7df97a67e7 Mon Sep 17 00:00:00 2001 From: Lubo Litchev Date: Wed, 17 Apr 2024 18:40:54 +0000 Subject: [PATCH 1/9] Make createReadOrMaskedRead a utility Made the createReadOrMaskedRead a utility function - to be accessible outside of the CU. Needed by the IREE new TopK implementation. --- .../Dialect/Linalg/Transforms/Transforms.h | 6 +++ .../Dialect/Linalg/Transforms/Transforms.cpp | 29 ++++++++++++++ .../Linalg/Transforms/Vectorization.cpp | 40 ------------------- 3 files changed, 35 insertions(+), 40 deletions(-) diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h index feb3b3f03cf53..f4c56b671e9d7 100644 --- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h +++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h @@ -1616,6 +1616,12 @@ void populateSplitReductionPattern( const ControlSplitReductionFn &controlSplitReductionFn, bool useAlloc = false); +/// Create a TransferReadOp from `source` with static shape `readShape`. If the +/// vector type for the read is not the same as the type of `source`, then a +/// mask is created on the read. +Value createReadOrMaskedRead(OpBuilder &builder, Location loc, + Value source, ArrayRef readShape, + Value padValue); } // namespace linalg } // namespace mlir diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp index a17bc8e4cd318..b32ebfc380fcf 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp @@ -1593,3 +1593,32 @@ void linalg::populateDecomposeConvolutionPatterns(RewritePatternSet &patterns, DownscaleSizeOneWindowed2DConvolution>( patterns.getContext(), benefit); } + +Value mlir::linalg::createReadOrMaskedRead(OpBuilder &builder, Location loc, + Value source, ArrayRef readShape, + Value padValue) { + assert(llvm::none_of(readShape, + [](int64_t s) { return s == ShapedType::kDynamic; })); + auto sourceShape = dyn_cast(source.getType()).getShape(); + assert(sourceShape.size() == readShape.size()); + auto maskType = VectorType::get(readShape, builder.getI1Type()); + auto vectorType = VectorType::get(readShape, padValue.getType()); + int64_t readRank = readShape.size(); + auto zero = builder.create(loc, 0); + auto transferReadOp = builder.create( + loc, + /*vectorType=*/vectorType, + /*source=*/source, + /*indices=*/SmallVector(readRank, zero), + /*padding=*/padValue, + /*inBounds=*/SmallVector(readRank, true)); + if (llvm::equal(readShape, sourceShape)) { + return transferReadOp; + } + SmallVector mixedSourceDims = + tensor::getMixedSizes(builder, loc, source); + Value mask = + builder.create(loc, maskType, mixedSourceDims); + return mlir::vector::maskOperation(builder, transferReadOp, mask) + ->getResult(0); +} diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp index df61381432921..e2ca5e1437728 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -1410,46 +1410,6 @@ static SmallVector getTiledPackShape(tensor::PackOp packOp, return applyPermutation(destShape, tensor::getPackInverseDestPerm(packOp)); } -/// Create a TransferReadOp from `source` with static shape `readShape`. If the -/// vector type for the read is not the same as the type of `source`, then a -/// mask is created on the read. If `doMasking` parameter is set to false we -/// update the `inBounds` attribute instead of masking. -static Value createReadOrMaskedRead(OpBuilder &builder, Location loc, - Value source, ArrayRef readShape, - Value padValue, bool doMasking = true) { - assert(llvm::none_of(readShape, - [](int64_t s) { return s == ShapedType::kDynamic; })); - auto sourceShape = dyn_cast(source.getType()).getShape(); - assert(sourceShape.size() == readShape.size()); - auto maskType = VectorType::get(readShape, builder.getI1Type()); - auto vectorType = VectorType::get(readShape, padValue.getType()); - int64_t readRank = readShape.size(); - auto zero = builder.create(loc, 0); - SmallVector inBoundsVal(readRank, true); - if (!doMasking) { - // Update the inBounds attribute. - for (unsigned i = 0; i < readRank; i++) - inBoundsVal[i] = sourceShape[i] == readShape[i]; - } - auto transferReadOp = builder.create( - loc, - /*vectorType=*/vectorType, - /*source=*/source, - /*indices=*/SmallVector(readRank, zero), - /*padding=*/padValue, - /*inBounds=*/inBoundsVal); - - if (llvm::equal(readShape, sourceShape) || !doMasking) { - return transferReadOp; - } - SmallVector mixedSourceDims = - tensor::getMixedSizes(builder, loc, source); - Value mask = - builder.create(loc, maskType, mixedSourceDims); - return mlir::vector::maskOperation(builder, transferReadOp, mask) - ->getResult(0); -} - /// Given an input, the mixed destSizes, and the vector sizes for vectorization, /// create an empty destination tensor and create a TransferWriteOp from the /// input to the empty tensor. If the destination shape is not the same as the From 10823adeb62c0c42b63d9e66706ead84bb0fb534 Mon Sep 17 00:00:00 2001 From: Lubo Litchev Date: Wed, 17 Apr 2024 18:48:13 +0000 Subject: [PATCH 2/9] Formatting fixes. --- mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h | 5 ++--- mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp | 5 +++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h index f4c56b671e9d7..a8175c9877677 100644 --- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h +++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h @@ -1619,9 +1619,8 @@ void populateSplitReductionPattern( /// Create a TransferReadOp from `source` with static shape `readShape`. If the /// vector type for the read is not the same as the type of `source`, then a /// mask is created on the read. -Value createReadOrMaskedRead(OpBuilder &builder, Location loc, - Value source, ArrayRef readShape, - Value padValue); +Value createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source, + ArrayRef readShape, Value padValue); } // namespace linalg } // namespace mlir diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp index b32ebfc380fcf..ebc7933f7fd35 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp @@ -1595,8 +1595,9 @@ void linalg::populateDecomposeConvolutionPatterns(RewritePatternSet &patterns, } Value mlir::linalg::createReadOrMaskedRead(OpBuilder &builder, Location loc, - Value source, ArrayRef readShape, - Value padValue) { + Value source, + ArrayRef readShape, + Value padValue) { assert(llvm::none_of(readShape, [](int64_t s) { return s == ShapedType::kDynamic; })); auto sourceShape = dyn_cast(source.getType()).getShape(); From 0898983570b7458ad8ed22065d37a5d9592954fd Mon Sep 17 00:00:00 2001 From: Lubo Litchev Date: Wed, 17 Apr 2024 21:21:10 +0000 Subject: [PATCH 3/9] Merge of the latest. --- .../mlir/Dialect/Linalg/Transforms/Transforms.h | 3 ++- mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp | 13 ++++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h index a8175c9877677..db6b23c589494 100644 --- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h +++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h @@ -1620,7 +1620,8 @@ void populateSplitReductionPattern( /// vector type for the read is not the same as the type of `source`, then a /// mask is created on the read. Value createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source, - ArrayRef readShape, Value padValue); + ArrayRef readShape, Value padValue, + bool doMasking = true); } // namespace linalg } // namespace mlir diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp index ebc7933f7fd35..b4d70c464e126 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp @@ -1597,7 +1597,7 @@ void linalg::populateDecomposeConvolutionPatterns(RewritePatternSet &patterns, Value mlir::linalg::createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source, ArrayRef readShape, - Value padValue) { + Value padValue, bool doMasking) { assert(llvm::none_of(readShape, [](int64_t s) { return s == ShapedType::kDynamic; })); auto sourceShape = dyn_cast(source.getType()).getShape(); @@ -1606,14 +1606,21 @@ Value mlir::linalg::createReadOrMaskedRead(OpBuilder &builder, Location loc, auto vectorType = VectorType::get(readShape, padValue.getType()); int64_t readRank = readShape.size(); auto zero = builder.create(loc, 0); + SmallVector inBoundsVal(readRank, true); + if (!doMasking) { + // Update the inBounds attribute. + for (unsigned i = 0; i < readRank; i++) + inBoundsVal[i] = sourceShape[i] == readShape[i]; + } auto transferReadOp = builder.create( loc, /*vectorType=*/vectorType, /*source=*/source, /*indices=*/SmallVector(readRank, zero), /*padding=*/padValue, - /*inBounds=*/SmallVector(readRank, true)); - if (llvm::equal(readShape, sourceShape)) { + /*inBounds=*/inBoundsVal); + + if (llvm::equal(readShape, sourceShape) || !doMasking) { return transferReadOp; } SmallVector mixedSourceDims = From d31a4cf82132ef3a9df95d343e031f63b305e077 Mon Sep 17 00:00:00 2001 From: Lubo Litchev Date: Fri, 19 Apr 2024 14:46:46 +0000 Subject: [PATCH 4/9] Addressed CR requests. Moved code around and other CR. --- .../Dialect/Linalg/Transforms/Transforms.h | 6 -- .../mlir/Dialect/Vector/Utils/VectorUtils.h | 17 +++++ .../Dialect/Linalg/Transforms/Transforms.cpp | 37 ---------- .../Linalg/Transforms/Vectorization.cpp | 50 +++----------- mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp | 68 +++++++++++++++++++ 5 files changed, 93 insertions(+), 85 deletions(-) diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h index db6b23c589494..feb3b3f03cf53 100644 --- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h +++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h @@ -1616,12 +1616,6 @@ void populateSplitReductionPattern( const ControlSplitReductionFn &controlSplitReductionFn, bool useAlloc = false); -/// Create a TransferReadOp from `source` with static shape `readShape`. If the -/// vector type for the read is not the same as the type of `source`, then a -/// mask is created on the read. -Value createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source, - ArrayRef readShape, Value padValue, - bool doMasking = true); } // namespace linalg } // namespace mlir diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h index f88fbdf9e6276..f7bea0c25813c 100644 --- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h +++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h @@ -180,6 +180,23 @@ struct MaskableOpRewritePattern : OpRewritePattern { /// are not linearizable. bool isLinearizableVector(VectorType type); +/// Create a TransferReadOp from `source` with static shape `readShape`. If the +/// vector type for the read is not the same as the type of `source`, then a +/// mask is created on the read. +/// enableMasking if false, the inBoundsVal values are set properly, based on +/// the rank dimensions of the source and destination tensors. +Value createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source, + ArrayRef readShape, Value padValue, + bool enableMasking = true); + +/// Returns success if `inputVectorSizes` is a valid masking configuraion for +/// given `shape`, i.e., it meets: +/// 1. The numbers of elements in both array are equal. +/// 2. `inputVectorSizes` does not have dynamic dimensions. +/// 3. All the values in `inputVectorSizes` are greater than or equal to +/// static sizes in `shape`. +LogicalResult isValidMaskedInputVector(ArrayRef shape, + ArrayRef inputVectorSizes); } // namespace vector /// Constructs a permutation map of invariant memref indices to vector diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp index b4d70c464e126..a17bc8e4cd318 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp @@ -1593,40 +1593,3 @@ void linalg::populateDecomposeConvolutionPatterns(RewritePatternSet &patterns, DownscaleSizeOneWindowed2DConvolution>( patterns.getContext(), benefit); } - -Value mlir::linalg::createReadOrMaskedRead(OpBuilder &builder, Location loc, - Value source, - ArrayRef readShape, - Value padValue, bool doMasking) { - assert(llvm::none_of(readShape, - [](int64_t s) { return s == ShapedType::kDynamic; })); - auto sourceShape = dyn_cast(source.getType()).getShape(); - assert(sourceShape.size() == readShape.size()); - auto maskType = VectorType::get(readShape, builder.getI1Type()); - auto vectorType = VectorType::get(readShape, padValue.getType()); - int64_t readRank = readShape.size(); - auto zero = builder.create(loc, 0); - SmallVector inBoundsVal(readRank, true); - if (!doMasking) { - // Update the inBounds attribute. - for (unsigned i = 0; i < readRank; i++) - inBoundsVal[i] = sourceShape[i] == readShape[i]; - } - auto transferReadOp = builder.create( - loc, - /*vectorType=*/vectorType, - /*source=*/source, - /*indices=*/SmallVector(readRank, zero), - /*padding=*/padValue, - /*inBounds=*/inBoundsVal); - - if (llvm::equal(readShape, sourceShape) || !doMasking) { - return transferReadOp; - } - SmallVector mixedSourceDims = - tensor::getMixedSizes(builder, loc, source); - Value mask = - builder.create(loc, maskType, mixedSourceDims); - return mlir::vector::maskOperation(builder, transferReadOp, mask) - ->getResult(0); -} diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp index e2ca5e1437728..4b5e19cb4a4c3 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -1667,8 +1667,8 @@ vectorizeAsTensorPadOp(RewriterBase &rewriter, tensor::PadOp padOp, .reifyResultShapes(rewriter, reifiedReturnShapes); (void)status; // prevent unused variable warning on non-assert builds assert(succeeded(status) && "failed to reify result shapes"); - auto maskedRead = createReadOrMaskedRead(rewriter, loc, padOp.getSource(), - inputVectorSizes, padValue); + auto maskedRead = vector::createReadOrMaskedRead( + rewriter, loc, padOp.getSource(), inputVectorSizes, padValue); Operation *write = createWriteOrMaskedWrite( rewriter, loc, maskedRead, reifiedReturnShapes[0], inputVectorSizes); newResults.push_back(write->getResult(0)); @@ -1741,41 +1741,6 @@ vectorizeDynamicLinalgOpPrecondition(linalg::LinalgOp op, return success(); } -/// Returns success if `inputVectorSizes` is a valid masking configuraion for -/// given `shape`, i.e., it meets: -/// 1. The numbers of elements in both array are equal. -/// 2. `inputVectorSizes` does not have dynamic dimensions. -/// 3. All the values in `inputVectorSizes` are greater than or equal to -/// static sizes in `shape`. -static LogicalResult -isValidMaskedInputVector(ArrayRef shape, - ArrayRef inputVectorSizes) { - LDBG("Iteration space static sizes:"); - LLVM_DEBUG(llvm::interleaveComma(shape, llvm::dbgs())); - LLVM_DEBUG(llvm::dbgs() << "\n"); - - if (inputVectorSizes.size() != shape.size()) { - LDBG("Input vector sizes don't match the number of loops"); - return failure(); - } - if (ShapedType::isDynamicShape(inputVectorSizes)) { - LDBG("Input vector sizes can't have dynamic dimensions"); - return failure(); - } - if (!llvm::all_of(llvm::zip(shape, inputVectorSizes), - [](std::tuple sizePair) { - int64_t staticSize = std::get<0>(sizePair); - int64_t inputSize = std::get<1>(sizePair); - return ShapedType::isDynamic(staticSize) || - staticSize <= inputSize; - })) { - LDBG("Input vector sizes must be greater than or equal to iteration space " - "static sizes"); - return failure(); - } - return success(); -} - /// Need to check if the inner-tiles are static/constant. static LogicalResult vectorizeUnPackOpPrecondition(tensor::UnPackOp unpackOp, @@ -1789,7 +1754,7 @@ vectorizeUnPackOpPrecondition(tensor::UnPackOp unpackOp, } llvm::ArrayRef resultShape = unpackOp.getDestType().getShape(); if (!inputVectorSizes.empty() && - failed(isValidMaskedInputVector(resultShape, inputVectorSizes))) + failed(vector::isValidMaskedInputVector(resultShape, inputVectorSizes))) return failure(); return success(); @@ -1803,8 +1768,8 @@ static LogicalResult vectorizeLinalgOpPrecondition( return failure(); // Check API contract for input vector sizes. if (!inputVectorSizes.empty() && - failed(isValidMaskedInputVector(linalgOp.getStaticLoopRanges(), - inputVectorSizes))) + failed(vector::isValidMaskedInputVector(linalgOp.getStaticLoopRanges(), + inputVectorSizes))) return failure(); if (linalgOp.hasDynamicShape() && failed(vectorizeDynamicLinalgOpPrecondition( @@ -1880,7 +1845,7 @@ vectorizePackOpPrecondition(tensor::PackOp packOp, } if (!satisfyEmptyCond && - failed(isValidMaskedInputVector( + failed(vector::isValidMaskedInputVector( resultTensorShape.take_front(packOp.getSourceRank()), inputVectorSizes))) return failure(); @@ -1905,7 +1870,8 @@ vectorizePadOpPrecondition(tensor::PadOp padOp, } ArrayRef resultTensorShape = padOp.getResultType().getShape(); - if (failed(isValidMaskedInputVector(resultTensorShape, inputVectorSizes))) + if (failed(vector::isValidMaskedInputVector(resultTensorShape, + inputVectorSizes))) return failure(); if (llvm::any_of(padOp.getLow(), [](Value v) { diff --git a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp index ebc6f5cbcaa9e..3057ac5d2f97b 100644 --- a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp +++ b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp @@ -322,3 +322,71 @@ bool vector::isLinearizableVector(VectorType type) { auto numScalableDims = llvm::count(type.getScalableDims(), true); return (type.getRank() > 1) && (numScalableDims <= 1); } + +Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc, + Value source, ArrayRef readShape, + Value padValue, bool enableMasking) { + assert(llvm::none_of(readShape, + [](int64_t s) { return s == ShapedType::kDynamic; }) && + "expected static shape"); + auto sourceShape = cast(source.getType()).getShape(); + assert(sourceShape.size() == readShape.size() && "expected same ranks."); + auto maskType = VectorType::get(readShape, builder.getI1Type()); + auto vectorType = VectorType::get(readShape, padValue.getType()); + assert(padValue.getType() == sourceShape.getElementType() && + "expected same pad element type to match source element type") + int64_t readRank = readShape.size(); + auto zero = builder.create(loc, 0); + SmallVector inBoundsVal(readRank, true); + if (!enableMasking) { + // Update the inBounds attribute. + for (unsigned i = 0; i < readRank; i++) + inBoundsVal[i] = (sourceShape[i] == readShape[i]) && + !ShapedType::isDynamic(sourceShape[] i); + } + auto transferReadOp = builder.create( + loc, + /*vectorType=*/vectorType, + /*source=*/source, + /*indices=*/SmallVector(readRank, zero), + /*padding=*/padValue, + /*inBounds=*/inBoundsVal); + + if (llvm::equal(readShape, sourceShape) || !doMasking) + return transferReadOp; + SmallVector mixedSourceDims = + tensor::getMixedSizes(builder, loc, source); + Value mask = + builder.create(loc, maskType, mixedSourceDims); + return mlir::vector::maskOperation(builder, transferReadOp, mask) + ->getResult(0); +} + +LogicalResult +vector::isValidMaskedInputVector(ArrayRef shape, + ArrayRef inputVectorSizes) { + LDBG("Iteration space static sizes:"); + LLVM_DEBUG(llvm::interleaveComma(shape, llvm::dbgs())); + LLVM_DEBUG(llvm::dbgs() << "\n"); + + if (inputVectorSizes.size() != shape.size()) { + LDBG("Input vector sizes don't match the number of loops"); + return failure(); + } + if (ShapedType::isDynamicShape(inputVectorSizes)) { + LDBG("Input vector sizes can't have dynamic dimensions"); + return failure(); + } + if (!llvm::all_of(llvm::zip(shape, inputVectorSizes), + [](std::tuple sizePair) { + int64_t staticSize = std::get<0>(sizePair); + int64_t inputSize = std::get<1>(sizePair); + return ShapedType::isDynamic(staticSize) || + staticSize <= inputSize; + })) { + LDBG("Input vector sizes must be greater than or equal to iteration space " + "static sizes"); + return failure(); + } + return success(); +} From 1897a650dcf5b428c81278e135729313c7b1c869 Mon Sep 17 00:00:00 2001 From: Lubo Litchev Date: Fri, 19 Apr 2024 17:17:35 +0000 Subject: [PATCH 5/9] Fixed some syntax errors, comments and other CRF. --- .../mlir/Dialect/Vector/Utils/VectorUtils.h | 12 +++++++++--- .../Linalg/Transforms/Vectorization.cpp | 6 +++--- mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp | 18 ++++++++++++------ 3 files changed, 24 insertions(+), 12 deletions(-) diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h index f7bea0c25813c..0fb8a2591e0bf 100644 --- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h +++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h @@ -182,9 +182,15 @@ bool isLinearizableVector(VectorType type); /// Create a TransferReadOp from `source` with static shape `readShape`. If the /// vector type for the read is not the same as the type of `source`, then a -/// mask is created on the read. -/// enableMasking if false, the inBoundsVal values are set properly, based on -/// the rank dimensions of the source and destination tensors. +/// mask is created on the read, if use of mask is specified or the bounds on a +/// dimension are different. +/// +/// `enableMasking` if false, the inBoundsVal values are set properly, based on +/// the rank dimensions of the source and destination tensors. And that is +/// what determines if masking is done. +/// +/// Note that the internal `vector::TransferReadOp` always read at indices zero +/// for each dimension of the passed in tensor. Value createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source, ArrayRef readShape, Value padValue, bool enableMasking = true); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp index 4b5e19cb4a4c3..30df53c2658df 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -1516,8 +1516,8 @@ vectorizeAsTensorPackOp(RewriterBase &rewriter, tensor::PackOp packOp, invertPermutationVector(outerDimsPerm)); for (auto [idx, size] : enumerate(innerTiles)) inputShape[innerDimsPos[idx]] *= size; - auto maskedRead = createReadOrMaskedRead(rewriter, loc, packOp.getSource(), - inputShape, padValue, doMasking); + auto maskedRead = vector::createReadOrMaskedRead( + rewriter, loc, packOp.getSource(), inputShape, padValue, doMasking); // Create ShapeCastOp. SmallVector destShape(inputVectorSizes); @@ -1609,7 +1609,7 @@ vectorizeAsTensorUnpackOp(RewriterBase &rewriter, tensor::UnPackOp unpackOp, // Read result, mask if necessary. If transferReadOp shape is not equal // to shape of source, then a mask is necessary. - Value readResult = createReadOrMaskedRead( + Value readResult = vector::createReadOrMaskedRead( rewriter, loc, unpackOp.getSource(), ArrayRef(readMaskShape.begin(), readMaskShape.end()), padValue); diff --git a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp index 3057ac5d2f97b..7d60e2a7f3e75 100644 --- a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp +++ b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp @@ -30,6 +30,11 @@ #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/SetVector.h" +#define DEBUG_TYPE "vector-utils" + +#define DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ") +#define LDBG(X) LLVM_DEBUG(DBGS() << X << "\n") + using namespace mlir; /// Helper function that creates a memref::DimOp or tensor::DimOp depending on @@ -329,20 +334,21 @@ Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc, assert(llvm::none_of(readShape, [](int64_t s) { return s == ShapedType::kDynamic; }) && "expected static shape"); - auto sourceShape = cast(source.getType()).getShape(); + auto sourceShapedType = cast(source.getType()); + auto sourceShape = sourceShapedType.getShape(); assert(sourceShape.size() == readShape.size() && "expected same ranks."); auto maskType = VectorType::get(readShape, builder.getI1Type()); auto vectorType = VectorType::get(readShape, padValue.getType()); - assert(padValue.getType() == sourceShape.getElementType() && - "expected same pad element type to match source element type") - int64_t readRank = readShape.size(); + assert(padValue.getType() == sourceShapedType.getElementType() && + "expected same pad element type to match source element type"); + int64_t readRank = readShape.size(); auto zero = builder.create(loc, 0); SmallVector inBoundsVal(readRank, true); if (!enableMasking) { // Update the inBounds attribute. for (unsigned i = 0; i < readRank; i++) inBoundsVal[i] = (sourceShape[i] == readShape[i]) && - !ShapedType::isDynamic(sourceShape[] i); + !ShapedType::isDynamic(sourceShape[i]); } auto transferReadOp = builder.create( loc, @@ -352,7 +358,7 @@ Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc, /*padding=*/padValue, /*inBounds=*/inBoundsVal); - if (llvm::equal(readShape, sourceShape) || !doMasking) + if (llvm::equal(readShape, sourceShape) || !enableMasking) return transferReadOp; SmallVector mixedSourceDims = tensor::getMixedSizes(builder, loc, source); From 31a0b27ccbed7afc1f8389ca23ae33fb9b8e49fb Mon Sep 17 00:00:00 2001 From: Lubo Litchev Date: Fri, 19 Apr 2024 17:30:25 +0000 Subject: [PATCH 6/9] More CR addressed. --- mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h | 2 +- mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h index 0fb8a2591e0bf..a56f2a78bdff0 100644 --- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h +++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h @@ -193,7 +193,7 @@ bool isLinearizableVector(VectorType type); /// for each dimension of the passed in tensor. Value createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source, ArrayRef readShape, Value padValue, - bool enableMasking = true); + bool useInBoundsInsteadOfMasking = true); /// Returns success if `inputVectorSizes` is a valid masking configuraion for /// given `shape`, i.e., it meets: diff --git a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp index 7d60e2a7f3e75..fcaf1ec944b47 100644 --- a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp +++ b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp @@ -330,7 +330,8 @@ bool vector::isLinearizableVector(VectorType type) { Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source, ArrayRef readShape, - Value padValue, bool enableMasking) { + Value padValue, + bool useInBoundsInsteadOfMasking) { assert(llvm::none_of(readShape, [](int64_t s) { return s == ShapedType::kDynamic; }) && "expected static shape"); @@ -344,7 +345,7 @@ Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc, int64_t readRank = readShape.size(); auto zero = builder.create(loc, 0); SmallVector inBoundsVal(readRank, true); - if (!enableMasking) { + if (!useInBoundsInsteadOfMasking) { // Update the inBounds attribute. for (unsigned i = 0; i < readRank; i++) inBoundsVal[i] = (sourceShape[i] == readShape[i]) && @@ -358,7 +359,7 @@ Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc, /*padding=*/padValue, /*inBounds=*/inBoundsVal); - if (llvm::equal(readShape, sourceShape) || !enableMasking) + if (llvm::equal(readShape, sourceShape) || !useInBoundsInsteadOfMasking) return transferReadOp; SmallVector mixedSourceDims = tensor::getMixedSizes(builder, loc, source); From 01d1e46e5aede48d40d5e4eba1acaf144950ec96 Mon Sep 17 00:00:00 2001 From: Lubo Litchev Date: Fri, 19 Apr 2024 21:01:04 +0000 Subject: [PATCH 7/9] Fixed a missed comment update. --- mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h index a56f2a78bdff0..8a57c6094c41c 100644 --- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h +++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h @@ -185,7 +185,8 @@ bool isLinearizableVector(VectorType type); /// mask is created on the read, if use of mask is specified or the bounds on a /// dimension are different. /// -/// `enableMasking` if false, the inBoundsVal values are set properly, based on +/// `useInBoundsInsteadOfMasking` if false, the inBoundsVal values are set +/// properly, based on /// the rank dimensions of the source and destination tensors. And that is /// what determines if masking is done. /// From 0a064dfdb4825d619a861f0aa9092b63f08d8f64 Mon Sep 17 00:00:00 2001 From: Lubo Litchev Date: Mon, 22 Apr 2024 17:20:25 +0000 Subject: [PATCH 8/9] Renamed a var per CR feedback. --- mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp index 30df53c2658df..11f7ac870b3cc 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -1499,11 +1499,11 @@ vectorizeAsTensorPackOp(RewriterBase &rewriter, tensor::PackOp packOp, // If the input vector sizes are not provided, then the vector sizes are // determined by the result tensor shape. In case the vector sizes aren't // provided, we update the inBounds attribute instead of masking. - bool doMasking = true; + bool useInBoundsInsteadOfMasking = true; if (inputVectorSizes.empty()) { ArrayRef resultTensorShape = packOp.getDestType().getShape(); inputVectorSizes = resultTensorShape.take_front(packOp.getSourceRank()); - doMasking = false; + useInBoundsInsteadOfMasking = false; } // Create masked TransferReadOp. @@ -1517,7 +1517,8 @@ vectorizeAsTensorPackOp(RewriterBase &rewriter, tensor::PackOp packOp, for (auto [idx, size] : enumerate(innerTiles)) inputShape[innerDimsPos[idx]] *= size; auto maskedRead = vector::createReadOrMaskedRead( - rewriter, loc, packOp.getSource(), inputShape, padValue, doMasking); + rewriter, loc, packOp.getSource(), inputShape, padValue, + useInBoundsInsteadOfMasking); // Create ShapeCastOp. SmallVector destShape(inputVectorSizes); From d27b69b0b38a73dcd5f1155ab1c720ac493e6bb0 Mon Sep 17 00:00:00 2001 From: Lubo Litchev Date: Thu, 25 Apr 2024 16:47:04 +0000 Subject: [PATCH 9/9] Update logic to reflect name of flag. The PR https://github.com/llvm/llvm-project/pull/89119 renamed a flag, which inverted it's previous meaning, but the logic was not updated to reflact that. Fixed the logic to reflect the inversion of the meaning of the name. --- mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h | 6 +++--- mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp | 4 ++-- mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h index 8a57c6094c41c..30e21c64c2731 100644 --- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h +++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h @@ -185,8 +185,8 @@ bool isLinearizableVector(VectorType type); /// mask is created on the read, if use of mask is specified or the bounds on a /// dimension are different. /// -/// `useInBoundsInsteadOfMasking` if false, the inBoundsVal values are set -/// properly, based on +/// `useInBoundsInsteadOfMasking` if true, the inBoundsVal values are set +/// properly, based on /// the rank dimensions of the source and destination tensors. And that is /// what determines if masking is done. /// @@ -194,7 +194,7 @@ bool isLinearizableVector(VectorType type); /// for each dimension of the passed in tensor. Value createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source, ArrayRef readShape, Value padValue, - bool useInBoundsInsteadOfMasking = true); + bool useInBoundsInsteadOfMasking = false); /// Returns success if `inputVectorSizes` is a valid masking configuraion for /// given `shape`, i.e., it meets: diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp index e836f0dc63b4f..be1b602783fe9 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -1499,11 +1499,11 @@ vectorizeAsTensorPackOp(RewriterBase &rewriter, tensor::PackOp packOp, // If the input vector sizes are not provided, then the vector sizes are // determined by the result tensor shape. In case the vector sizes aren't // provided, we update the inBounds attribute instead of masking. - bool useInBoundsInsteadOfMasking = true; + bool useInBoundsInsteadOfMasking = false; if (inputVectorSizes.empty()) { ArrayRef resultTensorShape = packOp.getDestType().getShape(); inputVectorSizes = resultTensorShape.take_front(packOp.getSourceRank()); - useInBoundsInsteadOfMasking = false; + useInBoundsInsteadOfMasking = true; } // Create masked TransferReadOp. diff --git a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp index fcaf1ec944b47..f9ddaa08a1910 100644 --- a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp +++ b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp @@ -345,7 +345,7 @@ Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc, int64_t readRank = readShape.size(); auto zero = builder.create(loc, 0); SmallVector inBoundsVal(readRank, true); - if (!useInBoundsInsteadOfMasking) { + if (useInBoundsInsteadOfMasking) { // Update the inBounds attribute. for (unsigned i = 0; i < readRank; i++) inBoundsVal[i] = (sourceShape[i] == readShape[i]) &&