/// Tensor -> MemRef type converter.
/// Parameters: Value, memory space, bufferization options
using UnknownTypeConverterFn = std::function<BaseMemRefType(
- Value, unsigned, const BufferizationOptions &)>;
+ Value, Attribute memorySpace, const BufferizationOptions &)>;
BufferizationOptions();
bool bufferizeFunctionBoundaries = false;
/// The default memory space that should be used when it cannot be inferred
- /// from the context. If no default memory space is specified, bufferization
- /// fails when the memory space cannot be inferred at any point.
- Optional<unsigned> defaultMemorySpace = 0;
+ /// from the context. If case of llvm::None, bufferization fails when the
+ /// memory space cannot be inferred at any point.
+ Optional<Attribute> defaultMemorySpace = Attribute();
/// Certain ops have aliasing OpOperand/OpResult invariants (e.g., scf.for).
/// If this flag is set to `false`, those invariants are no longer enforced
/// canonicalizations are currently not implemented.
BaseMemRefType getMemRefType(Value value, const BufferizationOptions &options,
MemRefLayoutAttrInterface layout = {},
- unsigned memorySpace = 0);
+ Attribute memorySpace = nullptr);
/// Return a MemRef type with fully dynamic layout. If the given tensor type
/// is unranked, return an unranked MemRef type.
-BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType,
- unsigned memorySpace = 0);
+BaseMemRefType
+getMemRefTypeWithFullyDynamicLayout(TensorType tensorType,
+ Attribute memorySpace = nullptr);
/// Return a MemRef type with a static identity layout (i.e., no layout map). If
/// the given tensor type is unranked, return an unranked MemRef type.
-BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType,
- unsigned memorySpace = 0);
+BaseMemRefType
+getMemRefTypeWithStaticIdentityLayout(TensorType tensorType,
+ Attribute memorySpace = nullptr);
/// Return the owner of the given value. In case of a BlockArgument that is the
/// owner of the block. In case of an OpResult that is the defining op.
let arguments = (ins Variadic<Index>:$dynamic_sizes,
Optional<AnyTensor>:$copy,
Optional<Index>:$size_hint,
- OptionalAttr<UI64Attr>:$memory_space);
+ OptionalAttr<AnyAttr>:$memory_space);
let results = (outs AnyTensor:$result);
// TODO: Implement memory space for this op. E.g., by adding a memory_space
// attribute to ConstantOp.
- if (options.defaultMemorySpace != static_cast<unsigned>(0))
+ if (options.defaultMemorySpace != Attribute())
return op->emitError("memory space not implemented yet");
// Only ranked tensors are supported.
return failure();
if (*trueType == *falseType)
return *trueType;
- if (trueType->getMemorySpaceAsInt() != falseType->getMemorySpaceAsInt())
+ if (trueType->getMemorySpace() != falseType->getMemorySpace())
return op->emitError("inconsistent memory space on true/false operands");
// If the buffers have different types, they differ only in their layout
return getMemRefTypeWithFullyDynamicLayout(
RankedTensorType::get(memrefType.getShape(),
memrefType.getElementType()),
- memrefType.getMemorySpaceAsInt());
+ memrefType.getMemorySpace());
}
BufferRelation bufferRelation(Operation *op, OpResult opResult,
FailureOr<BaseMemRefType> copyBufferType = getBufferType(tensor, options);
if (failed(copyBufferType))
return failure();
- allocTensorOp.setMemorySpaceAttr(
- b.getIntegerAttr(b.getIntegerType(64, /*isSigned=*/false),
- copyBufferType->getMemorySpaceAsInt()));
+ Attribute memorySpace = copyBufferType->getMemorySpace();
+ if (!memorySpace)
+ memorySpace = b.getI64IntegerAttr(0);
+ allocTensorOp.setMemorySpaceAttr(memorySpace);
return allocTensorOp.getResult();
}
/// Default unknown type converter: Use a fully dynamic layout map.
static BaseMemRefType
-defaultUnknownTypeConverter(Value value, unsigned memorySpace,
+defaultUnknownTypeConverter(Value value, Attribute memorySpace,
const BufferizationOptions &options) {
return getMemRefTypeWithFullyDynamicLayout(value.getType().cast<TensorType>(),
memorySpace);
BaseMemRefType bufferization::getMemRefType(Value value,
const BufferizationOptions &options,
MemRefLayoutAttrInterface layout,
- unsigned memorySpace) {
+ Attribute memorySpace) {
auto tensorType = value.getType().cast<TensorType>();
- auto memorySpaceAttr = IntegerAttr::get(
- IntegerType::get(tensorType.getContext(), 64), memorySpace);
// Case 1: Unranked memref type.
if (auto unrankedTensorType = tensorType.dyn_cast<UnrankedTensorType>()) {
assert(!layout && "UnrankedTensorType cannot have a layout map");
return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
- memorySpaceAttr);
+ memorySpace);
}
// Case 2: Ranked memref type with specified layout.
if (layout) {
return MemRefType::get(rankedTensorType.getShape(),
rankedTensorType.getElementType(), layout,
- memorySpaceAttr);
+ memorySpace);
}
return options.unknownTypeConverterFn(value, memorySpace, options);
BaseMemRefType
bufferization::getMemRefTypeWithFullyDynamicLayout(TensorType tensorType,
- unsigned memorySpace) {
+ Attribute memorySpace) {
// Case 1: Unranked memref type.
if (auto unrankedTensorType = tensorType.dyn_cast<UnrankedTensorType>()) {
return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
}
// Case 2: Ranked memref type.
- auto memorySpaceAttr = IntegerAttr::get(
- IntegerType::get(tensorType.getContext(), 64), memorySpace);
auto rankedTensorType = tensorType.cast<RankedTensorType>();
int64_t dynamicOffset = ShapedType::kDynamic;
SmallVector<int64_t> dynamicStrides(rankedTensorType.getRank(),
dynamicOffset, dynamicStrides);
return MemRefType::get(rankedTensorType.getShape(),
rankedTensorType.getElementType(), stridedLayout,
- memorySpaceAttr);
+ memorySpace);
}
/// Return a MemRef type with a static identity layout (i.e., no layout map). If
/// the given tensor type is unranked, return an unranked MemRef type.
BaseMemRefType
bufferization::getMemRefTypeWithStaticIdentityLayout(TensorType tensorType,
- unsigned memorySpace) {
+ Attribute memorySpace) {
// Case 1: Unranked memref type.
if (auto unrankedTensorType = tensorType.dyn_cast<UnrankedTensorType>()) {
return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
// Case 2: Ranked memref type.
auto rankedTensorType = tensorType.cast<RankedTensorType>();
- auto memorySpaceAttr = IntegerAttr::get(
- IntegerType::get(tensorType.getContext(), 64), memorySpace);
MemRefLayoutAttrInterface layout = {};
return MemRefType::get(rankedTensorType.getShape(),
rankedTensorType.getElementType(), layout,
- memorySpaceAttr);
+ memorySpace);
}
bool bufferization::detail::defaultIsRepetitiveRegion(
assert(value == getResult() && "invalid value");
// Compute memory space of this allocation.
- unsigned memorySpace;
+ Attribute memorySpace;
if (getMemorySpace().has_value()) {
memorySpace = *getMemorySpace();
} else if (getCopy()) {
bufferization::getBufferType(getCopy(), options, fixedTypes);
if (failed(copyBufferType))
return failure();
- memorySpace = copyBufferType->getMemorySpaceAsInt();
+ memorySpace = copyBufferType->getMemorySpace();
} else if (options.defaultMemorySpace.has_value()) {
memorySpace = *options.defaultMemorySpace;
} else {
// Configure type converter.
LayoutMapOption unknownTypeConversionOption =
parseLayoutMapOption(unknownTypeConversion);
- opt.unknownTypeConverterFn = [=](Value value, unsigned memorySpace,
+ opt.unknownTypeConverterFn = [=](Value value, Attribute memorySpace,
const BufferizationOptions &options) {
auto tensorType = value.getType().cast<TensorType>();
if (unknownTypeConversionOption == LayoutMapOption::IdentityLayoutMap)
options.allowUnknownOps = true;
options.createDeallocs = false;
options.enforceAliasingInvariants = false;
- options.unknownTypeConverterFn = [](Value value, unsigned memorySpace,
+ options.unknownTypeConverterFn = [](Value value, Attribute memorySpace,
const BufferizationOptions &options) {
return getMemRefTypeWithStaticIdentityLayout(
value.getType().cast<TensorType>(), memorySpace);
assert(rankedMemrefType && "buffer layout not supported on unranked tensors");
return MemRefType::get(
rankedMemrefType.getShape(), rankedMemrefType.getElementType(),
- layoutAttr.getValue(), rankedMemrefType.getMemorySpaceAsInt());
+ layoutAttr.getValue(), rankedMemrefType.getMemorySpace());
}
/// Return the FuncOp called by `callOp`.
return thenBufferType;
// Memory space mismatch.
- if (thenBufferType.getMemorySpaceAsInt() !=
- elseBufferType.getMemorySpaceAsInt())
+ if (thenBufferType.getMemorySpace() != elseBufferType.getMemorySpace())
return op->emitError("inconsistent memory space on then/else branches");
// Layout maps are different: Promote to fully dynamic layout map.
return getMemRefTypeWithFullyDynamicLayout(
- opResult.getType().cast<TensorType>(),
- thenBufferType.getMemorySpaceAsInt());
+ opResult.getType().cast<TensorType>(), thenBufferType.getMemorySpace());
}
BufferRelation bufferRelation(Operation *op, OpResult opResult,
auto iterRanked = initArgBufferType->cast<MemRefType>();
assert(llvm::equal(yieldedRanked.getShape(), iterRanked.getShape()) &&
"expected same shape");
- assert(yieldedRanked.getMemorySpaceAsInt() ==
- iterRanked.getMemorySpaceAsInt() &&
+ assert(yieldedRanked.getMemorySpace() == iterRanked.getMemorySpace() &&
"expected same memory space");
#endif // NDEBUG
return getMemRefTypeWithFullyDynamicLayout(
iterArg.getType().cast<RankedTensorType>(),
- yieldedRanked.getMemorySpaceAsInt());
+ yieldedRanked.getMemorySpace());
}
/// Return `true` if the given loop may have 0 iterations.
// should be disallowed.
options.allowReturnAllocs = true;
options.functionBoundaryTypeConversion = LayoutMapOption::IdentityLayoutMap;
- options.unknownTypeConverterFn = [](Value value, unsigned memorySpace,
+ options.unknownTypeConverterFn = [](Value value, Attribute memorySpace,
const BufferizationOptions &options) {
return getMemRefTypeWithStaticIdentityLayout(
value.getType().cast<TensorType>(), memorySpace);
layout = rankedMemRefType.getLayout();
// Compute the new memref type.
- Type resultMemRefType =
- getMemRefType(castOp.getResult(), options, layout,
- sourceMemRefType.getMemorySpaceAsInt());
+ Type resultMemRefType = getMemRefType(castOp.getResult(), options, layout,
+ sourceMemRefType.getMemorySpace());
// Replace the op with a memref.cast.
assert(memref::CastOp::areCastCompatible(resultBuffer->getType(),
// If dims cannot be collapsed, this op bufferizes to a new allocation.
RankedTensorType tensorResultType = collapseShapeOp.getResultType();
return bufferization::getMemRefTypeWithStaticIdentityLayout(
- tensorResultType, srcBufferType.getMemorySpaceAsInt());
+ tensorResultType, srcBufferType.getMemorySpace());
}
return memref::CollapseShapeOp::computeCollapsedType(
auto memrefType =
MemRefType::get(collapseShapeOp.getSrcType().getShape(),
collapseShapeOp.getSrcType().getElementType(),
- AffineMap(), bufferType.getMemorySpaceAsInt());
+ AffineMap(), bufferType.getMemorySpace());
buffer = rewriter.create<bufferization::ToMemrefOp>(
op->getLoc(), memrefType, *tensorAlloc);
}
fromElementsOp.getResult().cast<OpResult>(), options);
// TODO: Implement memory space for this op.
- if (options.defaultMemorySpace != static_cast<unsigned>(0))
+ if (options.defaultMemorySpace != Attribute())
return op->emitError("memory space not implemented yet");
// Allocate a buffer for the result.
generateOp.getResult().cast<OpResult>(), options);
// TODO: Implement memory space for this op.
- if (options.defaultMemorySpace != static_cast<unsigned>(0))
+ if (options.defaultMemorySpace != Attribute())
return op->emitError("memory space not implemented yet");
// Allocate memory.
return failure();
auto resultMemRefType = getMemRefType(
reshapeOp.getResult(), options, /*layout=*/{},
- srcBuffer->getType().cast<BaseMemRefType>().getMemorySpaceAsInt());
+ srcBuffer->getType().cast<BaseMemRefType>().getMemorySpace());
replaceOpWithNewBufferizedOp<memref::ReshapeOp>(
rewriter, op, resultMemRefType, *srcBuffer, *shapeBuffer);
return success();
// CHECK-LABEL: func @alloc_tensor_with_memory_space()
func.func @alloc_tensor_with_memory_space() -> tensor<5xf32> {
// CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32, 1>
- %0 = bufferization.alloc_tensor() {memory_space = 1 : ui64} : tensor<5xf32>
+ %0 = bufferization.alloc_tensor() {memory_space = 1 : i64} : tensor<5xf32>
// CHECK: %[[r:.*]] = bufferization.to_tensor %[[alloc]]
// CHECK: memref.dealloc %[[alloc]]
// CHECK: return %[[r]]
{
// CHECK: bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<5xf32>
// The second alloc_tensor should not have a copy operand.
- // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<5xf32>
+ // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : i64} : tensor<5xf32>
// CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true]} : tensor<5xf32>
- // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true], memory_space = 0 : ui64} : tensor<5xf32>
+ // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true], memory_space = 0 : i64} : tensor<5xf32>
%0 = bufferization.alloc_tensor() : tensor<5xf32>
%1 = tensor.insert %f into %0[%idx] : tensor<5xf32>
return %0, %1 : tensor<5xf32>, tensor<5xf32>
func.func @do_not_copy_when_overwritten(%t: tensor<5xf32>, %f: f32)
-> (tensor<5xf32>, tensor<5xf32>)
{
- // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<5xf32>
+ // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : i64} : tensor<5xf32>
// CHECK: linalg.generic {{.*}} outs(%[[alloc]] : tensor<5xf32>)
%r = linalg.generic {
indexing_maps = [affine_map<(d0) -> (d0)>],
-> (tensor<3xf32>)
{
%0 = tensor.extract_slice %t[0][3][1] : tensor<5xf32> to tensor<3xf32>
- // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<3xf32>
+ // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : i64} : tensor<3xf32>
// CHECK: linalg.generic {{.*}} outs(%[[alloc]] : tensor<3xf32>)
%r = linalg.generic {
indexing_maps = [affine_map<(d0) -> (d0)>],
call @foo(%0) : (tensor<20x40xf32, #DCSR>) -> ()
return
}
-
-// -----
-
-func.func @alloc_tensor_invalid_memory_space_attr(%sz: index) {
- // expected-error @+1{{'bufferization.alloc_tensor' op attribute 'memory_space' failed to satisfy constraint: 64-bit unsigned integer attribute}}
- %0 = bufferization.alloc_tensor(%sz) {memory_space = "foo"} : tensor<?xf32>
- return
-}
-
%c100 = arith.constant 100 : index
// CHECK: bufferization.alloc_tensor() size_hint=
%6 = bufferization.alloc_tensor() size_hint=%c100 : tensor<100x100xf64, #CSR>
+ // CHECK: bufferization.alloc_tensor(%{{.+}}) {memory_space = "foo"} : tensor<?xf32>
+ %7 = bufferization.alloc_tensor(%sz) {memory_space = "foo"} : tensor<?xf32>
return %1 : tensor<?x5xf32>
}
{
%c0 = arith.constant 0 : index
// CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32, 1>
- %0 = bufferization.alloc_tensor() {memory_space = 1 : ui64} : tensor<5xf32>
+ %0 = bufferization.alloc_tensor() {memory_space = 1 : i64} : tensor<5xf32>
// CHECK: scf.if %{{.*}} -> (memref<5xf32, 1>) {
%1 = scf.if %c -> tensor<5xf32> {
// CHECK: %[[cloned:.*]] = bufferization.clone %[[alloc]]
func.func @scf_execute_region_memory_space(%f: f32) -> f32 {
%c0 = arith.constant 0 : index
%0 = scf.execute_region -> tensor<5xf32> {
- %1 = bufferization.alloc_tensor() {memory_space = 1 : ui64} : tensor<5xf32>
+ %1 = bufferization.alloc_tensor() {memory_space = 1 : i64} : tensor<5xf32>
%2 = tensor.insert %f into %1[%c0] : tensor<5xf32>
scf.yield %2 : tensor<5xf32>
}
{
// CHECK: memref.alloc(%{{.*}}) {{.*}} : memref<?xf32, 1>
// CHECK: memref.alloc(%{{.*}}) {{.*}} : memref<?xf32, 1>
- %A = bufferization.alloc_tensor(%sz) {memory_space = 1 : ui64} : tensor<?xf32>
- %B = bufferization.alloc_tensor(%sz) {memory_space = 1 : ui64} : tensor<?xf32>
+ %A = bufferization.alloc_tensor(%sz) {memory_space = 1 : i64} : tensor<?xf32>
+ %B = bufferization.alloc_tensor(%sz) {memory_space = 1 : i64} : tensor<?xf32>
// CHECK: scf.for {{.*}} {
%r0:2 = scf.for %i = %lb to %ub step %step iter_args(%tA = %A, %tB = %B)
// CHECK-LABEL: func.func @fold_yield_arg_zero() -> tensor<1024x1024xf64> {
// CHECK: %[[VAL_0:.*]] = arith.constant dense<0.000000e+00> : tensor<1024x1024xf64>
-// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<1024x1024xf64>
+// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) {bufferization.escape = [false], memory_space = 0 : i64} : tensor<1024x1024xf64>
// CHECK: return %[[VAL_1]] : tensor<1024x1024xf64>
// CHECK: }
func.func @fold_yield_arg_zero() -> tensor<1024x1024xf64> {
// CHECK-LABEL: func.func @fold_yield_direct_zero() -> tensor<32xf64> {
// CHECK: %[[VAL_0:.*]] = arith.constant dense<0.000000e+00> : tensor<32xf64>
-// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<32xf64>
+// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) {bufferization.escape = [false], memory_space = 0 : i64} : tensor<32xf64>
// CHECK: return %[[VAL_1]] : tensor<32xf64>
// CHECK: }
func.func @fold_yield_direct_zero() -> tensor<32xf64> {
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant dense<0.000000e+00> : tensor<8x8xf64>
// CHECK: %[[VAL_7:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) {bufferization.escape = [false]} : tensor<8x8xf64>
-// CHECK: %[[VAL_8:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<8x8xf64>
+// CHECK: %[[VAL_8:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) {bufferization.escape = [false], memory_space = 0 : i64} : tensor<8x8xf64>
// CHECK: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64>
// CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<8x8xf64>
// CHECK: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{.*}}>> to memref<?xindex>
// CHECK: %[[alloc_tensor:.*]] = memref.alloc{{.*}} : memref<?xf32, 3>
// CHECK: memref.copy %[[t]], %[[alloc_tensor]]
%0 = bufferization.alloc_tensor() copy(%t)
- {memory_space = 3 : ui64} : tensor<?xf32>
+ {memory_space = 3 : i64} : tensor<?xf32>
// CHECK: %[[padded_alloc:.*]] = memref.alloc() {{.*}} : memref<15xf32, 3>
// CHECK: linalg.map
// CHECK: outs(%[[padded_alloc]] : memref<15xf32, 3>)