11// RUN: mlir-opt %s -split-input-file | mlir-opt | FileCheck %s
22
33// CHECK-LABEL: func private @sparse_1d_tensor(
4- // CHECK-SAME: tensor<32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>)
4+ // CHECK-SAME: tensor<32xf64, #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>>)
55func.func private @sparse_1d_tensor (tensor <32 xf64 , #sparse_tensor.encoding <{ map = (d0 ) -> (d0 : compressed) }>>)
66
77// -----
@@ -13,7 +13,7 @@ func.func private @sparse_1d_tensor(tensor<32xf64, #sparse_tensor.encoding<{ map
1313}>
1414
1515// CHECK-LABEL: func private @sparse_csr(
16- // CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ " dense", "compressed" ] , posWidth = 64, crdWidth = 64 }>>)
16+ // CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) , posWidth = 64, crdWidth = 64 }>>)
1717func.func private @sparse_csr (tensor <?x?xf32 , #CSR >)
1818
1919// -----
@@ -23,7 +23,7 @@ func.func private @sparse_csr(tensor<?x?xf32, #CSR>)
2323}>
2424
2525// CHECK-LABEL: func private @CSR_explicit(
26- // CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ " dense", "compressed" ] }>>
26+ // CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>>
2727func.func private @CSR_explicit (%arg0: tensor <?x?xf64 , #CSR_explicit >) {
2828 return
2929}
@@ -37,7 +37,7 @@ func.func private @CSR_explicit(%arg0: tensor<?x?xf64, #CSR_explicit>) {
3737}>
3838
3939// CHECK-LABEL: func private @sparse_csc(
40- // CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimToLvl = affine_map< (d0, d1) -> (d1, d0)> }>>)
40+ // CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ map = (d0, d1) -> (d1 : dense , d0 : compressed) }>>)
4141func.func private @sparse_csc (tensor <?x?xf32 , #CSC >)
4242
4343// -----
@@ -49,7 +49,7 @@ func.func private @sparse_csc(tensor<?x?xf32, #CSC>)
4949}>
5050
5151// CHECK-LABEL: func private @sparse_dcsc(
52- // CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimToLvl = affine_map< (d0, d1) -> (d1, d0)> , crdWidth = 64 }>>)
52+ // CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ map = (d0, d1) -> (d1 : compressed , d0 : compressed) , crdWidth = 64 }>>)
5353func.func private @sparse_dcsc (tensor <?x?xf32 , #DCSC >)
5454
5555// -----
@@ -59,7 +59,7 @@ func.func private @sparse_dcsc(tensor<?x?xf32, #DCSC>)
5959}>
6060
6161// CHECK-LABEL: func private @sparse_coo(
62- // CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu_no", "singleton_no" ] }>>)
62+ // CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique, nonordered), d1 : singleton(nonordered)) }>>)
6363func.func private @sparse_coo (tensor <?x?xf32 , #COO >)
6464
6565// -----
@@ -69,7 +69,7 @@ func.func private @sparse_coo(tensor<?x?xf32, #COO>)
6969}>
7070
7171// CHECK-LABEL: func private @sparse_bcoo(
72- // CHECK-SAME: tensor<?x?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ " dense", "loose_compressed_nu", "singleton" ] }>>)
72+ // CHECK-SAME: tensor<?x?x?xf32, #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : loose_compressed(nonunique), d2 : singleton) }>>)
7373func.func private @sparse_bcoo (tensor <?x?x?xf32 , #BCOO >)
7474
7575// -----
@@ -79,7 +79,7 @@ func.func private @sparse_bcoo(tensor<?x?x?xf32, #BCOO>)
7979}>
8080
8181// CHECK-LABEL: func private @sparse_sorted_coo(
82- // CHECK-SAME: tensor<10x10xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>)
82+ // CHECK-SAME: tensor<10x10xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }>>)
8383func.func private @sparse_sorted_coo (tensor <10 x10 xf64 , #SortedCOO >)
8484
8585// -----
@@ -94,7 +94,7 @@ func.func private @sparse_sorted_coo(tensor<10x10xf64, #SortedCOO>)
9494}>
9595
9696// CHECK-LABEL: func private @sparse_bcsr(
97- // CHECK-SAME: tensor<10x60xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense", "dense" ], dimToLvl = affine_map< (d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)> }>>
97+ // CHECK-SAME: tensor<10x60xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 floordiv 2 : compressed , d1 floordiv 3 : compressed , d0 mod 2 : dense , d1 mod 3 : dense) }>>
9898func.func private @sparse_bcsr (tensor <10 x60 xf64 , #BCSR >)
9999
100100
@@ -105,7 +105,7 @@ func.func private @sparse_bcsr(tensor<10x60xf64, #BCSR>)
105105}>
106106
107107// CHECK-LABEL: func private @sparse_ell(
108- // CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ], dimToLvl = affine_map< (d0, d1)[s0] -> (d0 * (s0 * 4), d0, d1)> }>>
108+ // CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = [s0] (d0, d1) -> (d0 * (s0 * 4) : dense , d0 : dense , d1 : compressed) }>>
109109func.func private @sparse_ell (tensor <?x?xf64 , #ELL >)
110110
111111// -----
@@ -115,7 +115,7 @@ func.func private @sparse_ell(tensor<?x?xf64, #ELL>)
115115}>
116116
117117// CHECK-LABEL: func private @sparse_slice(
118- // CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimSlices = [ (1, 4, 1), (1, 4, 2) ] }>>
118+ // CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0 : #sparse_tensor<slice (1, 4, 1)>, d1 : #sparse_tensor<slice (1, 4, 2)>) -> (d0 : dense, d1 : compressed) }>>
119119func.func private @sparse_slice (tensor <?x?xf64 , #CSR_SLICE >)
120120
121121// -----
@@ -125,7 +125,7 @@ func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
125125}>
126126
127127// CHECK-LABEL: func private @sparse_slice(
128- // CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimSlices = [ (1, ?, 1), (?, 4, 2) ] }>>
128+ // CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0 : #sparse_tensor<slice (1, ?, 1)>, d1 : #sparse_tensor<slice (?, 4, 2)>) -> (d0 : dense, d1 : compressed) }>>
129129func.func private @sparse_slice (tensor <?x?xf64 , #CSR_SLICE >)
130130
131131// -----
@@ -138,7 +138,7 @@ func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
138138}>
139139
140140// CHECK-LABEL: func private @sparse_2_out_of_4(
141- // CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ " dense", "compressed24" ] }>>
141+ // CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : block2_4) }>>
142142func.func private @sparse_2_out_of_4 (tensor <?x?xf64 , #NV_24 >)
143143
144144// -----
@@ -153,7 +153,7 @@ func.func private @sparse_2_out_of_4(tensor<?x?xf64, #NV_24>)
153153}>
154154
155155// CHECK-LABEL: func private @BCSR(
156- // CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense", "dense" ], dimToLvl = affine_map< (d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)> }>>
156+ // CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 floordiv 2 : compressed , d1 floordiv 3 : compressed , d0 mod 2 : dense , d1 mod 3 : dense) }>>
157157func.func private @BCSR (%arg0: tensor <?x?xf64 , #BCSR >) {
158158 return
159159}
@@ -174,7 +174,7 @@ func.func private @BCSR(%arg0: tensor<?x?xf64, #BCSR>) {
174174}>
175175
176176// CHECK-LABEL: func private @BCSR_explicit(
177- // CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense", "dense" ], dimToLvl = affine_map< (d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)> }>>
177+ // CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 floordiv 2 : compressed , d1 floordiv 3 : compressed , d0 mod 2 : dense , d1 mod 3 : dense) }>>
178178func.func private @BCSR_explicit (%arg0: tensor <?x?xf64 , #BCSR_explicit >) {
179179 return
180180}
@@ -190,7 +190,7 @@ func.func private @BCSR_explicit(%arg0: tensor<?x?xf64, #BCSR_explicit>) {
190190}>
191191
192192// CHECK-LABEL: func private @NV_24(
193- // CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed24" ], dimToLvl = affine_map< (d0, d1) -> (d0, d1 floordiv 4, d1 mod 4)> }>>
193+ // CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense , d1 floordiv 4 : dense , d1 mod 4 : block2_4) }>>
194194func.func private @NV_24 (%arg0: tensor <?x?xf64 , #NV_24 >) {
195195 return
196196}
0 commit comments