From 16b8f4ddae1cb36ac16c6eb451613c032e4064f6 Mon Sep 17 00:00:00 2001 From: Aart Bik Date: Mon, 4 Oct 2021 13:13:24 -0700 Subject: [PATCH] [mlir][sparse] add a "release" operation to sparse tensor dialect We have several ways to materialize sparse tensors (new and convert) but no explicit operation to release the underlying sparse storage scheme at runtime (other than making an explicit delSparseTensor() library call). To simplify memory management, a sparse_tensor.release operation has been introduced that lowers to the runtime library call while keeping tensors, opague pointers, and memrefs transparent in the initial IR. *Note* There is obviously some tension between the concept of immutable tensors and memory management methods. This tension is addressed by simply stating that after the "release" call, no further memref related operations are allowed on the tensor value. We expect the design to evolve over time, however, and arrive at a more satisfactory view of tensors and buffers eventually. Bug: http://llvm.org/pr52046 Reviewed By: bixia Differential Revision: https://reviews.llvm.org/D111099 --- .../Dialect/SparseTensor/IR/SparseTensorOps.td | 19 ++++++++++ .../SparseTensor/IR/SparseTensorDialect.cpp | 6 +++ .../Transforms/SparseTensorConversion.cpp | 32 ++++++++++++---- mlir/test/Dialect/SparseTensor/conversion.mlir | 9 +++++ mlir/test/Dialect/SparseTensor/invalid.mlir | 8 ++++ mlir/test/Dialect/SparseTensor/roundtrip.mlir | 13 +++++++ .../Dialect/SparseTensor/CPU/dense_output.mlir | 4 ++ .../Dialect/SparseTensor/CPU/lit.local.cfg | 3 -- .../Dialect/SparseTensor/CPU/sparse_cast.mlir | 16 ++++++++ ....mlir => sparse_constant_to_sparse_tensor.mlir} | 3 ++ .../SparseTensor/CPU/sparse_conversion.mlir | 12 +++++- .../SparseTensor/CPU/sparse_filter_conv2d.mlir | 4 ++ .../Dialect/SparseTensor/CPU/sparse_flatten.mlir | 4 +- .../Dialect/SparseTensor/CPU/sparse_matvec.mlir | 4 +- .../Dialect/SparseTensor/CPU/sparse_mttkrp.mlir | 4 +- .../SparseTensor/CPU/sparse_out_simple.mlir | 3 ++ .../SparseTensor/CPU/sparse_quantized_matmul.mlir | 4 ++ .../SparseTensor/CPU/sparse_reductions.mlir | 44 +++++++++++++++------- .../SparseTensor/CPU/sparse_sampled_matmul.mlir | 1 + .../SparseTensor/CPU/sparse_sampled_mm_fusion.mlir | 9 +++++ .../Dialect/SparseTensor/CPU/sparse_scale.mlir | 3 ++ .../Dialect/SparseTensor/CPU/sparse_spmm.mlir | 3 +- .../Dialect/SparseTensor/CPU/sparse_storage.mlir | 9 +++++ .../Dialect/SparseTensor/CPU/sparse_sum.mlir | 3 +- 24 files changed, 191 insertions(+), 29 deletions(-) rename mlir/test/Integration/Dialect/SparseTensor/CPU/{sparse-constant_to_sparse_tensor.mlir => sparse_constant_to_sparse_tensor.mlir} (95%) mode change 100644 => 100755 mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td index c6f5c97..3950c12 100644 --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td @@ -85,6 +85,25 @@ def SparseTensor_ConvertOp : SparseTensor_Op<"convert", let hasFolder = 1; } +def SparseTensor_ReleaseOp : SparseTensor_Op<"release", []>, + Arguments<(ins AnyTensor:$tensor)> { + string description = [{ + Releases the underlying sparse storage scheme for a tensor that + materialized earlier through a `new` operator or a non-trivial + `convert` operator with an annotated tensor type as destination. + This operation should only be called once for any materialized tensor. + Also, after this operation, any subsequent `memref` querying operation + on the tensor returns undefined results. + + Example: + + ```mlir + sparse_tensor.release %tensor : tensor<1024x1024xf64, #CSR> + ``` + }]; + let assemblyFormat = "$tensor attr-dict `:` type($tensor)"; +} + def SparseTensor_ToPointersOp : SparseTensor_Op<"pointers", [NoSideEffect]>, Arguments<(ins AnyTensor:$tensor, Index:$dim)>, Results<(outs AnyStridedMemRefOfRank<1>:$result)> { diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp index 4e63f57..8df79ed 100644 --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -235,6 +235,12 @@ OpFoldResult ConvertOp::fold(ArrayRef operands) { return {}; } +static LogicalResult verify(ReleaseOp op) { + if (!getSparseTensorEncoding(op.tensor().getType())) + return op.emitError("expected a sparse tensor to release"); + return success(); +} + static LogicalResult verify(ToPointersOp op) { if (auto e = getSparseTensorEncoding(op.tensor().getType())) { if (failed(isInBounds(op.dim(), op.tensor()))) diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp index 11f589c..d60a13f 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -99,8 +99,8 @@ static Value getTensor(ConversionPatternRewriter &rewriter, unsigned width, /// the "_emit_c_interface" on the function declaration when requested, /// so that LLVM lowering generates a wrapper function that takes care /// of ABI complications with passing in and returning MemRefs to C functions. -static FlatSymbolRefAttr getFunc(Operation *op, StringRef name, Type resultType, - ValueRange operands, +static FlatSymbolRefAttr getFunc(Operation *op, StringRef name, + TypeRange resultType, ValueRange operands, bool emitCInterface = false) { MLIRContext *context = op->getContext(); auto module = op->getParentOfType(); @@ -471,6 +471,23 @@ class SparseTensorConvertConverter : public OpConversionPattern { } }; +/// Sparse conversion rule for the release operator. +class SparseTensorReleaseConverter : public OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + LogicalResult + matchAndRewrite(ReleaseOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + StringRef name = "delSparseTensor"; + TypeRange none; + rewriter.create(op.getLoc(), none, + getFunc(op, name, none, adaptor.getOperands()), + adaptor.getOperands()); + rewriter.eraseOp(op); + return success(); + } +}; + /// Sparse conversion rule for pointer accesses. class SparseTensorToPointersConverter : public OpConversionPattern { @@ -483,7 +500,7 @@ public: Type eltType = resType.cast().getElementType(); StringRef name; if (eltType.isIndex()) - name = "sparsePointers"; + name = "sparsePointers"; // 64-bit, but its own name for unique signature else if (eltType.isInteger(64)) name = "sparsePointers64"; else if (eltType.isInteger(32)) @@ -514,7 +531,7 @@ public: Type eltType = resType.cast().getElementType(); StringRef name; if (eltType.isIndex()) - name = "sparseIndices"; + name = "sparseIndices"; // 64-bit, but its own name for unique signature else if (eltType.isInteger(64)) name = "sparseIndices64"; else if (eltType.isInteger(32)) @@ -609,7 +626,8 @@ void mlir::populateSparseTensorConversionPatterns(TypeConverter &typeConverter, RewritePatternSet &patterns) { patterns.add( - typeConverter, patterns.getContext()); + SparseTensorReleaseConverter, SparseTensorToPointersConverter, + SparseTensorToIndicesConverter, SparseTensorToValuesConverter, + SparseTensorToTensorConverter>(typeConverter, + patterns.getContext()); } diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir index f2831f8..48d8a7e 100644 --- a/mlir/test/Dialect/SparseTensor/conversion.mlir +++ b/mlir/test/Dialect/SparseTensor/conversion.mlir @@ -112,6 +112,15 @@ func @sparse_new3d(%arg0: !llvm.ptr) -> tensor { return %0 : tensor } +// CHECK-LABEL: func @sparse_release( +// CHECK-SAME: %[[A:.*]]: !llvm.ptr) +// CHECK: call @delSparseTensor(%[[A]]) : (!llvm.ptr) -> () +// CHECK: return +func @sparse_release(%arg0: tensor<128xf64, #SparseVector>) { + sparse_tensor.release %arg0 : tensor<128xf64, #SparseVector> + return +} + // CHECK-LABEL: func @sparse_nop_convert( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> !llvm.ptr // CHECK: return %[[A]] : !llvm.ptr diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir index d2f8eee..18b90e8 100644 --- a/mlir/test/Dialect/SparseTensor/invalid.mlir +++ b/mlir/test/Dialect/SparseTensor/invalid.mlir @@ -8,6 +8,14 @@ func @invalid_new_dense(%arg0: !llvm.ptr) -> tensor<32xf32> { // ----- +func @invalid_release_dense(%arg0: tensor<4xi32>) { + // expected-error@+1 {{expected a sparse tensor to release}} + sparse_tensor.release %arg0 : tensor<4xi32> + return +} + +// ----- + func @invalid_pointers_dense(%arg0: tensor<128xf64>) -> memref { %c = constant 0 : index // expected-error@+1 {{expected a sparse tensor to get pointers}} diff --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir index dcb2b7f..770e903 100644 --- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir @@ -15,6 +15,19 @@ func @sparse_new(%arg0: !llvm.ptr) -> tensor<128xf64, #SparseVector> { #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +// CHECK-LABEL: func @sparse_release( +// CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}> +// CHECK: sparse_tensor.release %[[A]] : tensor<128xf64, #{{.*}}> +// CHECK: return +func @sparse_release(%arg0: tensor<128xf64, #SparseVector>) { + sparse_tensor.release %arg0 : tensor<128xf64, #SparseVector> + return +} + +// ----- + +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> + // CHECK-LABEL: func @sparse_convert_1d_to_sparse( // CHECK-SAME: %[[A:.*]]: tensor<64xf32>) // CHECK: %[[T:.*]] = sparse_tensor.convert %[[A]] : tensor<64xf32> to tensor<64xf32, #{{.*}}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir index 76b9852..c0e624b 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir @@ -95,6 +95,10 @@ module { %v = vector.load %m[%c0] : memref, vector<25xf64> vector.print %v : vector<25xf64> + // Release the resources. + sparse_tensor.release %a : tensor + sparse_tensor.release %x : tensor + return } } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg b/mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg index 7ac5190..83247d7 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg @@ -3,6 +3,3 @@ import sys # No JIT on win32. if sys.platform == 'win32': config.unsupported = True - -# http://llvm.org/pr52046 -config.environment['ASAN_OPTIONS'] = 'detect_leaks=0' diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir index 5fcc27e..2a75fd0 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir @@ -273,6 +273,22 @@ module { %v9 = vector.transfer_read %m9[%z], %i: memref<10xi32>, vector<10xi32> vector.print %v9 : vector<10xi32> + // Release the resources. + sparse_tensor.release %1 : tensor<10xi32, #SV> + sparse_tensor.release %3 : tensor<10xf32, #SV> + sparse_tensor.release %5 : tensor<10xf64, #SV> + sparse_tensor.release %7 : tensor<10xf64, #SV> + memref.dealloc %m0 : memref<10xf32> + memref.dealloc %m1 : memref<10xf32> + memref.dealloc %m2 : memref<10xi32> + memref.dealloc %m3 : memref<10xi32> + memref.dealloc %m4 : memref<10xf64> + memref.dealloc %m5 : memref<10xf32> + memref.dealloc %m6 : memref<10xi64> + memref.dealloc %m7 : memref<10xi64> + memref.dealloc %m8 : memref<10xi8> + memref.dealloc %m9 : memref<10xi32> + return } } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse-constant_to_sparse_tensor.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir similarity index 95% rename from mlir/test/Integration/Dialect/SparseTensor/CPU/sparse-constant_to_sparse_tensor.mlir rename to mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir index 9154b40..e0141bc 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse-constant_to_sparse_tensor.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir @@ -45,6 +45,9 @@ module { %vr = vector.transfer_read %v[%c0], %d0: memref, vector<8xf64> vector.print %vr : vector<8xf64> + // Release the resources. + sparse_tensor.release %ts : tensor<10x8xf64, #Tensor1> + return } } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir index a6a9782..4e0aaf1 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir @@ -245,7 +245,17 @@ module { call @dumpf64(%v2) : (memref) -> () call @dumpf64(%v3) : (memref) -> () + // Release the resources. + sparse_tensor.release %1 : tensor<2x3x4xf64, #Tensor1> + sparse_tensor.release %2 : tensor<2x3x4xf64, #Tensor2> + sparse_tensor.release %3 : tensor<2x3x4xf64, #Tensor3> + sparse_tensor.release %b : tensor<2x3x4xf64, #Tensor1> + sparse_tensor.release %c : tensor<2x3x4xf64, #Tensor1> + sparse_tensor.release %d : tensor<2x3x4xf64, #Tensor2> + sparse_tensor.release %f : tensor<2x3x4xf64, #Tensor2> + sparse_tensor.release %g : tensor<2x3x4xf64, #Tensor3> + sparse_tensor.release %h : tensor<2x3x4xf64, #Tensor3> + return } } - diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir index 63627db..4135af2 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir @@ -84,6 +84,10 @@ module { : memref<6x6xi32>, vector<6x6xi32> vector.print %v : vector<6x6xi32> + // Release the resources. + sparse_tensor.release %sparse_filter : tensor<3x3xi32, #DCSR> + memref.dealloc %m : memref<6x6xi32> + return } } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir index 6ef8adb..957c210 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir @@ -41,7 +41,8 @@ module { // A kernel that flattens a rank 8 tensor into a dense matrix. // func @kernel_flatten(%arga: tensor<7x3x3x3x3x3x5x3xf64, #SparseTensor>, - %argx: tensor<7x3xf64>) -> tensor<7x3xf64> { + %argx: tensor<7x3xf64> {linalg.inplaceable = true}) + -> tensor<7x3xf64> { %0 = linalg.generic #trait_flatten ins(%arga: tensor<7x3x3x3x3x3x5x3xf64, #SparseTensor>) outs(%argx: tensor<7x3xf64>) { @@ -99,6 +100,7 @@ module { // Release the resources. memref.dealloc %xdata : memref<7x3xf64> + sparse_tensor.release %a : tensor<7x3x3x3x3x3x5x3xf64, #SparseTensor> return } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir index 0f05d0b..a378ba1 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir @@ -54,7 +54,8 @@ module { // func @kernel_matvec(%arga: tensor, %argb: tensor, - %argx: tensor) -> tensor { + %argx: tensor {linalg.inplaceable = true}) + -> tensor { %0 = linalg.generic #matvec ins(%arga, %argb: tensor, tensor) outs(%argx: tensor) { @@ -111,6 +112,7 @@ module { // Release the resources. memref.dealloc %bdata : memref memref.dealloc %xdata : memref + sparse_tensor.release %a : tensor return } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir index e061012..4b7721c 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir @@ -40,7 +40,8 @@ module { func @kernel_mttkrp(%argb: tensor, %argc: tensor, %argd: tensor, - %arga: tensor) -> tensor { + %arga: tensor {linalg.inplaceable = true}) + -> tensor { %0 = linalg.generic #mttkrp ins(%argb, %argc, %argd: tensor, tensor, tensor) @@ -126,6 +127,7 @@ module { memref.dealloc %adata : memref memref.dealloc %cdata : memref memref.dealloc %ddata : memref + sparse_tensor.release %b : tensor return } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir index 8c36275..7d2a406 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir @@ -72,6 +72,9 @@ module { %v = vector.transfer_read %m[%c0], %d0: memref, vector<9xf64> vector.print %v : vector<9xf64> + // Release the resources. + sparse_tensor.release %x : tensor + return } } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir index ffc07ae..62a5e58 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir @@ -71,6 +71,10 @@ module { : memref<5x6xi32>, vector<5x6xi32> vector.print %v : vector<5x6xi32> + // Release the resources. + sparse_tensor.release %sparse_input2 : tensor<3x6xi8, #DCSR> + memref.dealloc %m : memref<5x6xi32> + return } } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir index d2d0467..867e17f 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir @@ -125,16 +125,14 @@ module { return %0 : tensor } - func @dump_i32(%arg0 : tensor) { - %m = memref.buffer_cast %arg0 : memref - %v = memref.load %m[] : memref + func @dump_i32(%arg0 : memref) { + %v = memref.load %arg0[] : memref vector.print %v : i32 return } - func @dump_f32(%arg0 : tensor) { - %m = memref.buffer_cast %arg0 : memref - %v = memref.load %m[] : memref + func @dump_f32(%arg0 : memref) { + %v = memref.load %arg0[] : memref vector.print %v : f32 return } @@ -203,13 +201,33 @@ module { // CHECK: 15 // CHECK: 10 // - call @dump_i32(%0) : (tensor) -> () - call @dump_f32(%1) : (tensor) -> () - call @dump_i32(%2) : (tensor) -> () - call @dump_f32(%3) : (tensor) -> () - call @dump_i32(%4) : (tensor) -> () - call @dump_i32(%5) : (tensor) -> () - call @dump_i32(%6) : (tensor) -> () + %m0 = memref.buffer_cast %0 : memref + call @dump_i32(%m0) : (memref) -> () + %m1 = memref.buffer_cast %1 : memref + call @dump_f32(%m1) : (memref) -> () + %m2 = memref.buffer_cast %2 : memref + call @dump_i32(%m2) : (memref) -> () + %m3 = memref.buffer_cast %3 : memref + call @dump_f32(%m3) : (memref) -> () + %m4 = memref.buffer_cast %4 : memref + call @dump_i32(%m4) : (memref) -> () + %m5 = memref.buffer_cast %5 : memref + call @dump_i32(%m5) : (memref) -> () + %m6 = memref.buffer_cast %6 : memref + call @dump_i32(%m6) : (memref) -> () + + // Release the resources. + sparse_tensor.release %sparse_input_i32 : tensor<32xi32, #SV> + sparse_tensor.release %sparse_input_f32 : tensor<32xf32, #SV> + sparse_tensor.release %dense_input_i32 : tensor<32xi32, #DV> + sparse_tensor.release %dense_input_f32 : tensor<32xf32, #DV> + memref.dealloc %m0 : memref + memref.dealloc %m1 : memref + memref.dealloc %m2 : memref + memref.dealloc %m3 : memref + memref.dealloc %m4 : memref + memref.dealloc %m5 : memref + memref.dealloc %m6 : memref return } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir index 2b6e57c..569f762 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir @@ -128,6 +128,7 @@ module { memref.dealloc %adata : memref memref.dealloc %bdata : memref memref.dealloc %xdata : memref + sparse_tensor.release %s : tensor return } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir old mode 100644 new mode 100755 index 63f8215..3a4b322 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir @@ -28,6 +28,10 @@ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s +// +// Interop between linalg/sparse leaves some issues to be revolved: +// UNSUPPORTED: asan + #SM = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> @@ -163,6 +167,11 @@ module { vector.print %v0 : vector<8x8xf64> vector.print %v1 : vector<8x8xf64> + // Release the resources. + sparse_tensor.release %s : tensor<8x8xf64, #SM> + memref.dealloc %m0 : memref<8x8xf64> + memref.dealloc %m1 : memref<8x8xf64> + return } } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir index 7db8176..3b1f40a 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir @@ -74,6 +74,9 @@ module { %v = vector.transfer_read %m[%c0], %f0: memref, vector<16xf32> vector.print %v : vector<16xf32> + // Release the resources. + sparse_tensor.release %1 : tensor<8x8xf32, #CSR> + return } } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir index 4c977e1..5085bd2 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir @@ -38,7 +38,7 @@ module { // func @kernel_spmm(%arga: tensor, %argb: tensor, - %argx: tensor) -> tensor { + %argx: tensor {linalg.inplaceable = true}) -> tensor { %0 = linalg.generic #spmm ins(%arga, %argb: tensor, tensor) outs(%argx: tensor) { @@ -101,6 +101,7 @@ module { // Release the resources. memref.dealloc %bdata : memref memref.dealloc %xdata : memref + sparse_tensor.release %a : tensor return } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir index cbac014..09465ac 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir @@ -250,6 +250,15 @@ module { %50 = vector.transfer_read %49[%c0], %d0: memref, vector<70xf64> vector.print %50 : vector<70xf64> + // Release the resources. + sparse_tensor.release %0 : tensor<10x8xf64, #Dense> + sparse_tensor.release %1 : tensor<10x8xf64, #CSR> + sparse_tensor.release %2 : tensor<10x8xf64, #DCSR> + sparse_tensor.release %3 : tensor<10x8xf64, #CSC> + sparse_tensor.release %4 : tensor<10x8xf64, #DCSC> + sparse_tensor.release %x : tensor<10x8xf64, #BlockRow> + sparse_tensor.release %y : tensor<10x8xf64, #BlockCol> + return } } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir index 6a9ee01..b6fcaee 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir @@ -35,7 +35,7 @@ module { // A kernel that sum-reduces a matrix to a single scalar. // func @kernel_sum_reduce(%arga: tensor, - %argx: tensor) -> tensor { + %argx: tensor {linalg.inplaceable = true}) -> tensor { %0 = linalg.generic #trait_sum_reduce ins(%arga: tensor) outs(%argx: tensor) { @@ -79,6 +79,7 @@ module { // Release the resources. memref.dealloc %xdata : memref + sparse_tensor.release %a : tensor return } -- 2.7.4