If neither `copy` nor `memory_space` is specified, the default memory space
is used during bufferization.
+ The optional `size_hint` operand specifies the number of non-zero elements
+ for sparse tensors. The value of `size_hint` should be not less than 1 and
+ not larger than the linear size of the corresponding dense tensor type. If
+ this requirement is not met, the behavior of the operator is undefined.
+
Both dense and sparse tensor types are supported. The result of a
`bufferization.alloc_tensor` is a tensor value that can be used like any
other tensor value. In practice, it is often used as the "out" operand of
outs(%c: tensor<?x?xf32, #SparseMatrix>) -> tensor<?x?xf32, #SparseMatrix>
return %0 : tensor<?x?xf32, #SparseMatrix>
```
+
+ ```mlir
+ %c = bufferization.alloc_tensor(%d1, %d2) size_hint = %noe
+ : tensor<?x?xf32, #SparseMatrix>
+ ```
}];
let arguments = (ins Variadic<Index>:$dynamic_sizes,
Optional<AnyTensor>:$copy,
+ Optional<Index>:$size_hint,
OptionalAttr<UI64Attr>:$memory_space);
let results = (outs AnyTensor:$result);
}];
let builders = [
- // Build an op without `copy` or `memory_space`.
+ // Build an op without `copy` or `memory_space` or `size_hint`.
OpBuilder<(ins "RankedTensorType":$type, "ValueRange":$dynamicSizes)>,
- // Build an op without `memory_space`.
+ // Build an op without `memory_space` or `size_hint`.
OpBuilder<(ins "RankedTensorType":$type, "ValueRange":$dynamicSizes,
"Value":$copy)>,
+
+ // Build an op without `size_hint`.
+ OpBuilder<(ins "TensorType":$type, "ValueRange":$dynamicSizes,
+ "Value":$copy, "IntegerAttr":$memory_space)>,
];
let hasCanonicalizer = 1;
void AllocTensorOp::build(OpBuilder &builder, OperationState &result,
RankedTensorType type, ValueRange dynamicSizes) {
build(builder, result, type, dynamicSizes, /*copy=*/Value(),
+ /*size_hint=*/Value(),
/*memory_space=*/IntegerAttr());
}
void AllocTensorOp::build(OpBuilder &builder, OperationState &result,
RankedTensorType type, ValueRange dynamicSizes,
Value copy) {
- build(builder, result, type, dynamicSizes, copy,
+ build(builder, result, type, dynamicSizes, copy, /*size_hint=*/Value(),
/*memory_space=*/IntegerAttr());
}
+void AllocTensorOp::build(OpBuilder &builder, OperationState &result,
+ TensorType type, ValueRange dynamicSizes, Value copy,
+ IntegerAttr memorySpace) {
+ build(builder, result, type, dynamicSizes, copy, /*size_hint=*/Value(),
+ memorySpace);
+}
+
namespace {
/// Change the type of the result of a `bufferization.alloc_tensor` by making
/// the result type statically sized along dimension that in the original
if (parser.parseLParen() || parser.parseOperand(copyOperand) ||
parser.parseRParen())
return failure();
+ ParseResult sizeHintKeyword = parser.parseOptionalKeyword("size_hint");
+ OpAsmParser::UnresolvedOperand sizeHintOperand;
+ if (sizeHintKeyword.succeeded())
+ if (parser.parseEqual() || parser.parseOperand(sizeHintOperand))
+ return failure();
if (parser.parseOptionalAttrDict(result.attributes) || parser.parseColon())
return failure();
if (copyKeyword.succeeded())
if (parser.resolveOperand(copyOperand, type, result.operands))
return failure();
+ if (sizeHintKeyword.succeeded())
+ if (parser.resolveOperand(sizeHintOperand, indexType, result.operands))
+ return failure();
result.addAttribute(AllocTensorOp::getOperandSegmentSizeAttr(),
parser.getBuilder().getDenseI32ArrayAttr(
{static_cast<int32_t>(dynamicSizesOperands.size()),
- static_cast<int32_t>(copyKeyword.succeeded())}));
+ static_cast<int32_t>(copyKeyword.succeeded()),
+ static_cast<int32_t>(sizeHintKeyword.succeeded())}));
return success();
}
p << "(" << getDynamicSizes() << ")";
if (getCopy())
p << " copy(" << getCopy() << ")";
+ if (getSizeHint())
+ p << " size_hint=" << getSizeHint();
p.printOptionalAttrDict((*this)->getAttrs(), /*elidedAttrs=*/{
AllocTensorOp::getOperandSegmentSizeAttr()});
p << " : ";
// RUN: mlir-opt %s | mlir-opt | FileCheck %s
// RUN: mlir-opt %s --mlir-print-op-generic | mlir-opt | FileCheck %s
+#CSR = #sparse_tensor.encoding<{
+ dimLevelType = ["dense", "compressed"]
+}>
+
// CHECK-LABEL: func @test_clone
func.func @test_clone(%buf : memref<*xf32>) -> memref<*xf32> {
%clone = bufferization.clone %buf : memref<*xf32> to memref<*xf32>
%4 = bufferization.alloc_tensor() copy(%t) {escape = true} : tensor<?x5xf32>
// CHECK: bufferization.alloc_tensor() copy(%{{.*}}) {escape = false} : tensor<?x5xf32>
%5 = bufferization.alloc_tensor() copy(%t) {escape = false} : tensor<?x5xf32>
+ %c100 = arith.constant 100 : index
+ // CHECK: bufferization.alloc_tensor() size_hint=
+ %6 = bufferization.alloc_tensor() size_hint=%c100 : tensor<100x100xf64, #CSR>
return %1 : tensor<?x5xf32>
}