From 03529b99b36788ca836b7ce238ea9400ce89847b Mon Sep 17 00:00:00 2001 From: Han-Chung Wang Date: Mon, 6 Nov 2023 15:35:50 -0800 Subject: [PATCH] [mlir][linalg] Add support for vectorizing dynamic elementwise named ops (#71454) We are able to vectorize them in linalg.generic form. We just need to relax the condition, so it can also vectorize named ops. --- .../Linalg/Transforms/Vectorization.cpp | 8 ++++-- mlir/test/Dialect/Linalg/vectorization.mlir | 28 +++++++++++++++++++ 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp index b427af33e3c440..b8d82159856825 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -1465,9 +1465,11 @@ static LogicalResult reductionPreconditions(LinalgOp op) { } static LogicalResult vectorizeDynamicLinalgOpPrecondition(linalg::LinalgOp op) { - // TODO: Masking only supports dynamic generic ops for now. - if (!isa(op.getOperation())) + // TODO: Masking only supports dynamic element-wise ops, linalg.generic ops, + // linalg.copy ops and ops that implement ContractionOpInterface for now. + if (!isElementwise(op) && + !isa( + op.getOperation())) return failure(); LDBG("Dynamically-shaped op meets vectorization pre-conditions\n"); diff --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir index 7f4af344886f49..610339405d1c2c 100644 --- a/mlir/test/Dialect/Linalg/vectorization.mlir +++ b/mlir/test/Dialect/Linalg/vectorization.mlir @@ -368,6 +368,34 @@ module attributes {transform.with_named_sequence} { // ----- +// CHECK: #[[MAP:.*]] = affine_map<(d0, d1) -> (d1, d0)> +// CHECK: func @test_masked_vectorize_linalg_transpose +func.func @test_masked_vectorize_linalg_transpose(%arg0: tensor, %arg1: tensor) -> tensor { + // CHECK: %[[C0:.*]] = arith.constant 0 : index + // CHECK: %[[D0:.*]] = tensor.dim %arg0, %[[C0]] + // CHECK: %[[C1:.*]] = arith.constant 1 : index + // CHECK: %[[D1:.*]] = tensor.dim %arg0, %[[C1]] + // CHECK: %[[MASK0:.*]] = vector.create_mask %[[D0]], %[[D1]] + // CHECK: %[[LOAD:.*]] = vector.mask %[[MASK0]] { vector.transfer_read %arg0{{.+}} } + // CHECK-SAME: vector<2x4xi1> -> vector<2x4xf32> + // CHECK: %[[MASK1:.*]] = vector.create_mask %[[D1]], %[[D0]] + // CHECK: %[[WRITE:.*]] = vector.mask %[[MASK1]] { vector.transfer_write %[[LOAD]], %arg1{{.+}} permutation_map = #[[MAP]]{{.+}} } + // CHECK-SAME: vector<4x2xi1> -> tensor + // CHECK: return %[[WRITE]] + %0 = linalg.transpose ins(%arg0 : tensor) outs(%arg1 : tensor) permutation = [1, 0] + return %0 : tensor +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["linalg.transpose"]} in %arg1 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize %0 vector_sizes [2, 4] : !transform.any_op + transform.yield + } +} + +// ----- + // CHECK-LABEL: func @test_masked_vectorize_linalg_copy func.func @test_masked_vectorize_linalg_copy(%A : memref, %B : memref) { // CHECK: %[[c0:.*]] = arith.constant 0 : index