Both dense and sparse tensor types are supported. The result of a
`bufferization.alloc_tensor` is a tensor value that can be used like any
other tensor value. In practice, it is often used as the "out" operand of
- another op. E.g.:
+ another op. Sparse tensor allocations should always be used in a local
+ construction operation and never escape the function boundary directly.
+
+ Example:
```mlir
%c = bufferization.alloc_tensor [%d1, %d2] : tensor<?x?xf32, #SparseMatrix>
%0 = linalg.matmul
ins(%a, %b: tensor<?x?xf32, #SparseMatrix>, tensor<?x?xf32, #SparseMatrix>)
outs(%c: tensor<?x?xf32, #SparseMatrix>) -> tensor<?x?xf32, #SparseMatrix>
+ return %0 : tensor<?x?xf32, #SparseMatrix>
```
}];
#include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/MemRef/Utils/MemRefUtils.h"
+#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Matchers.h"
<< getType().getNumDynamicDims() << " dynamic sizes";
if (getCopy() && getCopy().getType() != getType())
return emitError("expected that `copy` and return type match");
+
+ // For sparse tensor allocation, we require that none of its
+ // uses escapes the function boundary directly.
+ if (sparse_tensor::getSparseTensorEncoding(getType())) {
+ for (auto &use : getOperation()->getUses())
+ if (isa<func::ReturnOp, func::CallOp, func::CallIndirectOp>(
+ use.getOwner()))
+ return emitError("sparse tensor allocation should not escape function");
+ }
+
return success();
}
MLIRDialect
MLIRFuncDialect
MLIRIR
+ MLIRSparseTensorDialect
MLIRTensorDialect
MLIRMemRefDialect
)
// expected-error @+1{{'bufferization.escape' only valid on bufferizable ops}}
%0 = memref.cast %m0 {bufferization.escape = [true]} : memref<?xf32> to memref<10xf32>
return
-}
\ No newline at end of file
+}
+
+// -----
+
+#DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>
+
+func.func @sparse_alloc_direct_return() -> tensor<20x40xf32, #DCSR> {
+ // expected-error @+1{{sparse tensor allocation should not escape function}}
+ %0 = bufferization.alloc_tensor() : tensor<20x40xf32, #DCSR>
+ return %0 : tensor<20x40xf32, #DCSR>
+}
+
+// -----
+
+#DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>
+
+func.func private @foo(tensor<20x40xf32, #DCSR>) -> ()
+
+func.func @sparse_alloc_call() {
+ // expected-error @+1{{sparse tensor allocation should not escape function}}
+ %0 = bufferization.alloc_tensor() : tensor<20x40xf32, #DCSR>
+ call @foo(%0) : (tensor<20x40xf32, #DCSR>) -> ()
+ return
+}
+
// CHECK: return %[[T]] : !llvm.ptr<i8>
func.func @sparse_init(%arg0: index, %arg1: index) -> tensor<?x?xf64, #SparseMatrix> {
%0 = bufferization.alloc_tensor(%arg0, %arg1) : tensor<?x?xf64, #SparseMatrix>
- return %0 : tensor<?x?xf64, #SparseMatrix>
+ %1 = sparse_tensor.load %0 : tensor<?x?xf64, #SparseMatrix>
+ return %1 : tensor<?x?xf64, #SparseMatrix>
}
// CHECK-LABEL: func @sparse_release(
func.func @sparse_and_dense_init(%arg0: index, %arg1: index)
-> (tensor<?x?xf64, #SparseMatrix>, tensor<?x?xf64>) {
%0 = bufferization.alloc_tensor(%arg0, %arg1) : tensor<?x?xf64, #SparseMatrix>
- %1 = bufferization.alloc_tensor(%arg0, %arg1) : tensor<?x?xf64>
- return %0, %1 : tensor<?x?xf64, #SparseMatrix>, tensor<?x?xf64>
+ %1 = sparse_tensor.load %0 : tensor<?x?xf64, #SparseMatrix>
+ %2 = bufferization.alloc_tensor(%arg0, %arg1) : tensor<?x?xf64>
+ return %1, %2 : tensor<?x?xf64, #SparseMatrix>, tensor<?x?xf64>
}
":IR",
":InferTypeOpInterface",
":MemRefDialect",
+ ":SparseTensorDialect",
":Support",
":TensorDialect",
"//llvm:Support",