Skip to content

Commit

Permalink
update test cases.
Browse files Browse the repository at this point in the history
  • Loading branch information
Peiming Liu committed Sep 18, 2023
1 parent 6bbb3ba commit b21bc7e
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 198 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -110,14 +110,12 @@ static void forEachIJPairInXs(
Value cstep = constantIndex(builder, loc, xPerm.getNumResults() + ny);
Value iOffset = builder.create<arith::MulIOp>(loc, args[0], cstep);
Value jOffset = builder.create<arith::MulIOp>(loc, args[1], cstep);
for (AffineExpr e : xPerm.getResults()) {
unsigned k = e.cast<AffineDimExpr>().getPosition();
scf::IfOp ifOp;
Value i, j, buffer;
Value ck = constantIndex(builder, loc, k);
i = builder.create<arith::AddIOp>(loc, ck, iOffset);
j = builder.create<arith::AddIOp>(loc, ck, jOffset);
buffer = args[xStartIdx];
for (unsigned k = 0, e = xPerm.getNumResults(); k < e; k++) {
unsigned actualK = xPerm.getResult(k).cast<AffineDimExpr>().getPosition();
Value ak = constantIndex(builder, loc, actualK);
Value i = builder.create<arith::AddIOp>(loc, ak, iOffset);
Value j = builder.create<arith::AddIOp>(loc, ak, jOffset);
Value buffer = args[xStartIdx];

bodyBuilder(k, i, j, buffer);
}
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@
// Do the same run, but now with VLA vectorization.
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}

#ID_MAP = affine_map<(d0, d1, d2) -> (d0, d1, d2)>

module {
// Stores 5 values to the memref buffer.
func.func @storeValuesTo(%b: memref<?xi32>, %v0: i32, %v1: i32, %v2: i32,
Expand Down Expand Up @@ -109,7 +111,7 @@ module {
: (memref<?xi32, strided<[4], offset: ?>>, i32, i32, i32, i32, i32) -> ()
call @storeValuesTo(%y1, %c5, %c7, %c4, %c9, %c7)
: (memref<?xi32>, i32, i32, i32, i32, i32) -> ()
sparse_tensor.sort_coo quick_sort %i5, %xy jointly %y1 {nx = 3 : index, ny = 1 : index}
sparse_tensor.sort_coo quick_sort %i5, %xy jointly %y1 {nx = #ID_MAP, ny = 1 : index}
: memref<?xi32> jointly memref<?xi32>
%x0v = vector.transfer_read %x0[%i0], %c100: memref<?xi32, strided<[4], offset: ?>>, vector<5xi32>
vector.print %x0v : vector<5xi32>
Expand Down Expand Up @@ -137,7 +139,7 @@ module {
: (memref<?xi32, strided<[4], offset: ?>>, i32, i32, i32, i32, i32) -> ()
call @storeValuesTo(%y1, %c5, %c7, %c4, %c9, %c7)
: (memref<?xi32>, i32, i32, i32, i32, i32) -> ()
sparse_tensor.sort_coo insertion_sort_stable %i5, %xy jointly %y1 {nx = 3 : index, ny = 1 : index}
sparse_tensor.sort_coo insertion_sort_stable %i5, %xy jointly %y1 {nx = #ID_MAP, ny = 1 : index}
: memref<?xi32> jointly memref<?xi32>
%x0v2 = vector.transfer_read %x0[%i0], %c100: memref<?xi32, strided<[4], offset: ?>>, vector<5xi32>
vector.print %x0v2 : vector<5xi32>
Expand Down Expand Up @@ -165,7 +167,7 @@ module {
: (memref<?xi32, strided<[4], offset: ?>>, i32, i32, i32, i32, i32) -> ()
call @storeValuesTo(%y1, %c5, %c7, %c4, %c9, %c7)
: (memref<?xi32>, i32, i32, i32, i32, i32) -> ()
sparse_tensor.sort_coo heap_sort %i5, %xy jointly %y1 {nx = 3 : index, ny = 1 : index}
sparse_tensor.sort_coo heap_sort %i5, %xy jointly %y1 {nx = #ID_MAP, ny = 1 : index}
: memref<?xi32> jointly memref<?xi32>
%x0v3 = vector.transfer_read %x0[%i0], %c100: memref<?xi32, strided<[4], offset: ?>>, vector<5xi32>
vector.print %x0v3 : vector<5xi32>
Expand Down

0 comments on commit b21bc7e

Please sign in to comment.