Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[mlir][sparse] code formatting (NFC) #74779

Merged
merged 1 commit into from
Dec 7, 2023
Merged

[mlir][sparse] code formatting (NFC) #74779

merged 1 commit into from
Dec 7, 2023

Conversation

aartbik
Copy link
Contributor

@aartbik aartbik commented Dec 7, 2023

No description provided.

@llvmbot llvmbot added mlir:sparse Sparse compiler in MLIR mlir labels Dec 7, 2023
@llvmbot
Copy link
Member

llvmbot commented Dec 7, 2023

@llvm/pr-subscribers-mlir-sparse

@llvm/pr-subscribers-mlir

Author: Aart Bik (aartbik)

Changes

Full diff: https://github.com/llvm/llvm-project/pull/74779.diff

2 Files Affected:

  • (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir (+6-6)
  • (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir (+7-9)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir
index 7825e8fe9bafa..6c35e2b51ed8f 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir
@@ -41,8 +41,8 @@
 module {
 
   func.func @conv2d(%input:  tensor<8x8xi32>,
-               %filter: tensor<3x3xi32>,
-               %output: tensor<6x6xi32>) -> tensor<6x6xi32> {
+                    %filter: tensor<3x3xi32>,
+                    %output: tensor<6x6xi32>) -> tensor<6x6xi32> {
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32>, tensor<3x3xi32>)
       outs (%output: tensor<6x6xi32>) -> tensor<6x6xi32>
@@ -50,7 +50,7 @@ module {
   }
 
   func.func @conv2d_all_sparse_DCSR(%input:  tensor<8x8xi32, #DCSR>,
-               %filter: tensor<3x3xi32, #DCSR>) -> tensor<6x6xi32, #DCSR> {
+                                    %filter: tensor<3x3xi32, #DCSR>) -> tensor<6x6xi32, #DCSR> {
     %s = tensor.empty() : tensor<6x6xi32, #DCSR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #DCSR>, tensor<3x3xi32, #DCSR>)
@@ -59,7 +59,7 @@ module {
   }
 
   func.func @conv2d_all_sparse_CSR(%input:  tensor<8x8xi32, #CSR>,
-               %filter: tensor<3x3xi32, #CSR>) -> tensor<6x6xi32, #CSR> {
+                                   %filter: tensor<3x3xi32, #CSR>) -> tensor<6x6xi32, #CSR> {
     %s = tensor.empty() : tensor<6x6xi32, #CSR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #CSR>, tensor<3x3xi32, #CSR>)
@@ -68,7 +68,7 @@ module {
   }
 
   func.func @conv2d_all_sparse_CD(%input:  tensor<8x8xi32, #CDR>,
-               %filter: tensor<3x3xi32, #CDR>) -> tensor<6x6xi32, #CDR> {
+                                  %filter: tensor<3x3xi32, #CDR>) -> tensor<6x6xi32, #CDR> {
     %s = tensor.empty() : tensor<6x6xi32, #CDR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #CDR>, tensor<3x3xi32, #CDR>)
@@ -77,7 +77,7 @@ module {
   }
 
   func.func @conv2d_all_sparse_CSC(%input:  tensor<8x8xi32, #CSC>,
-               %filter: tensor<3x3xi32, #CSC>) -> tensor<6x6xi32, #CSC> {
+                                   %filter: tensor<3x3xi32, #CSC>) -> tensor<6x6xi32, #CSC> {
     %s = tensor.empty() : tensor<6x6xi32, #CSC>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #CSC>, tensor<3x3xi32, #CSC>)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir
index 80946f5388520..f2907db7d825b 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir
@@ -46,8 +46,8 @@
 module {
 
   func.func @conv2d(%input:  tensor<8x8xi32>,
-               %filter: tensor<3x3xi32>,
-               %output: tensor<6x6xi32>) -> tensor<6x6xi32> {
+                    %filter: tensor<3x3xi32>,
+                    %output: tensor<6x6xi32>) -> tensor<6x6xi32> {
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32>, tensor<3x3xi32>)
       outs (%output: tensor<6x6xi32>) -> tensor<6x6xi32>
@@ -70,7 +70,7 @@ module {
   }
 
   func.func @conv2d_sparse_out(%input:  tensor<8x8xi32>,
-               %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #DCSR> {
+                               %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #DCSR> {
     %s = tensor.empty() : tensor<6x6xi32, #DCSR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32>, tensor<3x3xi32>)
@@ -79,7 +79,7 @@ module {
   }
 
   func.func @conv2d_all_sparse_DCSR(%input:  tensor<8x8xi32, #DCSR>,
-               %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #DCSR> {
+                                    %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #DCSR> {
     %s = tensor.empty() : tensor<6x6xi32, #DCSR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #DCSR>, tensor<3x3xi32>)
@@ -88,7 +88,7 @@ module {
   }
 
   func.func @conv2d_all_sparse_CSR(%input:  tensor<8x8xi32, #CSR>,
-               %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CSR> {
+                                   %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CSR> {
     %s = tensor.empty() : tensor<6x6xi32, #CSR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #CSR>, tensor<3x3xi32>)
@@ -97,7 +97,7 @@ module {
   }
 
   func.func @conv2d_all_sparse_CD(%input:  tensor<8x8xi32, #CDR>,
-               %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CDR> {
+                                  %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CDR> {
     %s = tensor.empty() : tensor<6x6xi32, #CDR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #CDR>, tensor<3x3xi32>)
@@ -106,7 +106,7 @@ module {
   }
 
   func.func @conv2d_all_sparse_CSC(%input:  tensor<8x8xi32, #CSC>,
-               %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CSC> {
+                                   %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CSC> {
     %s = tensor.empty() : tensor<6x6xi32, #CSC>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #CSC>, tensor<3x3xi32>)
@@ -125,7 +125,6 @@ module {
       [ -1,  0,  1 ]
     ]> : tensor<3x3xi32>
 
-
     %input = arith.constant dense<[
       [  1,  2,  3,  4,  0,  6,  7,  8 ],
       [  2,  2,  4,  4,  0,  0,  6,  8 ],
@@ -270,7 +269,6 @@ module {
       : tensor<6x6xi32>, vector<6x6xi32>
     vector.print %v : vector<6x6xi32>
 
-
     // Release the resources.
     bufferization.dealloc_tensor %sparse_input_DCSR : tensor<8x8xi32, #DCSR>
     bufferization.dealloc_tensor %sparse_input_CSR : tensor<8x8xi32, #CSR>

@aartbik aartbik merged commit 7003e25 into llvm:main Dec 7, 2023
@aartbik aartbik deleted the bik branch December 7, 2023 23:46
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
mlir:sparse Sparse compiler in MLIR mlir
Projects
None yet
Development

Successfully merging this pull request may close these issues.

3 participants