From 057e33ef36d0230a1bf62763cc6381769266e641 Mon Sep 17 00:00:00 2001 From: Peiming Liu Date: Mon, 27 Jun 2022 10:17:58 -0700 Subject: [PATCH] [mlir][sparse]Add more integration tests for sparse_tensor.unary Previously, the sparse_tensor.unary integration test does not contain cases with the use of `linalg.index` (previoulsy unsupported), this commit adds test cases that use `linalg.index` operators. Reviewed By: aartbik Differential Revision: https://reviews.llvm.org/D128460 --- .../Dialect/SparseTensor/CPU/sparse_unary.mlir | 80 ++++++++++++++++++++-- 1 file changed, 75 insertions(+), 5 deletions(-) diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir index 1a8e8d9..e28f5f3 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir @@ -71,6 +71,30 @@ module { return %0 : tensor } + // Performs B[i] = i * A[i]. + func.func @vector_magnify(%arga: tensor) -> tensor { + %c = arith.constant 0 : index + %d = tensor.dim %arga, %c : tensor + %xv = bufferization.alloc_tensor(%d) : tensor + %0 = linalg.generic #trait_vec_scale + ins(%arga: tensor) + outs(%xv: tensor) { + ^bb(%a: f64, %x: f64): + %idx = linalg.index 0 : index + %1 = sparse_tensor.unary %a : f64 to f64 + present={ + ^bb0(%x0: f64): + %tmp = arith.index_cast %idx : index to i64 + %idxf = arith.uitofp %tmp : i64 to f64 + %ret = arith.mulf %x0, %idxf : f64 + sparse_tensor.yield %ret : f64 + } + absent={} + linalg.yield %1 : f64 + } -> tensor + return %0 : tensor + } + // Clips values to the range [3, 7]. func.func @matrix_clip(%argx: tensor) -> tensor { %c0 = arith.constant 0 : index @@ -99,6 +123,40 @@ module { return %0 : tensor } + // Slices matrix and only keep the value of the lower-right corner of the original + // matrix (i.e., A[2/d0 ..][2/d1 ..]), and set other values to 99. + func.func @matrix_slice(%argx: tensor) -> tensor { + %c0 = arith.constant 0 : index + %c1 = arith.constant 1 : index + %d0 = tensor.dim %argx, %c0 : tensor + %d1 = tensor.dim %argx, %c1 : tensor + %xv = bufferization.alloc_tensor(%d0, %d1) : tensor + %0 = linalg.generic #trait_mat_scale + ins(%argx: tensor) + outs(%xv: tensor) { + ^bb(%a: f64, %x: f64): + %row = linalg.index 0 : index + %col = linalg.index 1 : index + %1 = sparse_tensor.unary %a: f64 to f64 + present={ + ^bb0(%x0: f64): + %v = arith.constant 99.0 : f64 + %two = arith.constant 2 : index + %r = arith.muli %two, %row : index + %c = arith.muli %two, %col : index + %cmp1 = arith.cmpi "ult", %r, %d0 : index + %tmp = arith.select %cmp1, %v, %x0 : f64 + %cmp2 = arith.cmpi "ult", %c, %d1 : index + %result = arith.select %cmp2, %v, %tmp : f64 + sparse_tensor.yield %result : f64 + } + absent={} + linalg.yield %1 : f64 + } -> tensor + return %0 : tensor + } + + // Dumps a sparse vector of type f64. func.func @dump_vec_f64(%arg0: tensor) { // Dump the values array to verify only sparse contents are stored. @@ -171,10 +229,14 @@ module { : (tensor) -> tensor %1 = call @vector_negation(%sv1) : (tensor) -> tensor - + %2 = call @vector_magnify(%sv1) + : (tensor) -> tensor + // Call sparse matrix kernels. - %2 = call @matrix_clip(%sm1) + %3 = call @matrix_clip(%sm1) + : (tensor) -> tensor + %4 = call @matrix_slice(%sm1) : (tensor) -> tensor // @@ -186,20 +248,28 @@ module { // CHECK-NEXT: ( 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0 ) // CHECK-NEXT: ( -1, 1, 1, -2, 1, 1, 1, 1, 1, 1, 1, -3, 1, 1, 1, 1, 1, -4, 1, 1, -5, -6, 1, 1, 1, 1, 1, 1, -7, -8, 1, -9 ) // CHECK-NEXT: ( -1, 1, 1, -2, 1, 1, 1, 1, 1, 1, 1, -3, 1, 1, 1, 1, 1, -4, 1, 1, -5, -6, 1, 1, 1, 1, 1, 1, -7, -8, 1, -9 ) + // CHECK-NEXT: ( 0, 6, 33, 68, 100, 126, 196, 232, 279, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 ) + // CHECK-NEXT: ( 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 33, 0, 0, 0, 0, 0, 68, 0, 0, 100, 126, 0, 0, 0, 0, 0, 0, 196, 232, 0, 279 ) // CHECK-NEXT: ( 3, 3, 3, 4, 5, 6, 7, 7, 7, -1, -1, -1, -1, -1, -1, -1 ) // CHECK-NEXT: ( ( 3, 3, 0, 0, 0, 0, 0, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 3 ), ( 0, 0, 4, 0, 5, 0, 0, 6 ), ( 7, 0, 7, 7, 0, 0, 0, 0 ) ) + // CHECK-NEXT: ( 99, 99, 99, 99, 5, 6, 99, 99, 99, -1, -1, -1, -1, -1, -1, -1 ) + // CHECK-NEXT: ( ( 99, 99, 0, 0, 0, 0, 0, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 99 ), ( 0, 0, 99, 0, 5, 0, 0, 6 ), ( 99, 0, 99, 99, 0, 0, 0, 0 ) ) // call @dump_vec_f64(%sv1) : (tensor) -> () call @dump_vec_i32(%0) : (tensor) -> () call @dump_vec_f64(%1) : (tensor) -> () - call @dump_mat(%2) : (tensor) -> () - + call @dump_vec_f64(%2) : (tensor) -> () + call @dump_mat(%3) : (tensor) -> () + call @dump_mat(%4) : (tensor) -> () + // Release the resources. sparse_tensor.release %sv1 : tensor sparse_tensor.release %sm1 : tensor sparse_tensor.release %0 : tensor sparse_tensor.release %1 : tensor - sparse_tensor.release %2 : tensor + sparse_tensor.release %2 : tensor + sparse_tensor.release %3 : tensor + sparse_tensor.release %4 : tensor return } } -- 2.7.4