From 1366467a3ba9c489bbabe27f89cf6af404601149 Mon Sep 17 00:00:00 2001 From: Uday Bondhugula Date: Sat, 14 Sep 2019 13:21:00 -0700 Subject: [PATCH] update normalizeMemRef utility; handle missing failure check + add more tests - take care of symbolic operands with alloc - add missing check for compose map failure and a test case - add test cases on strides - drop incorrect check for one-to-one'ness Signed-off-by: Uday Bondhugula Closes tensorflow/mlir#132 COPYBARA_INTEGRATE_REVIEW=https://github.com/tensorflow/mlir/pull/132 from bondhugula:normalize-memrefs 8aebf285fb0d7c19269d85255aed644657e327b7 PiperOrigin-RevId: 269105947 --- mlir/include/mlir/Dialect/StandardOps/Ops.td | 6 ++++ mlir/lib/Transforms/Utils/Utils.cpp | 19 ++++++------- mlir/test/Transforms/memref-normalize.mlir | 42 ++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 10 deletions(-) diff --git a/mlir/include/mlir/Dialect/StandardOps/Ops.td b/mlir/include/mlir/Dialect/StandardOps/Ops.td index 629bddf..426ec65 100644 --- a/mlir/include/mlir/Dialect/StandardOps/Ops.td +++ b/mlir/include/mlir/Dialect/StandardOps/Ops.td @@ -156,6 +156,12 @@ def AllocOp : Std_Op<"alloc"> { let extraClassDeclaration = [{ MemRefType getType() { return getResult()->getType().cast(); } + + /// Returns the number of symbolic operands (the ones in square brackets), + /// which bind to the symbols of the memref's layout map. + unsigned getNumSymbolicOperands() { + return getNumOperands() - getType().getNumDynamicDims(); + } }]; let hasCanonicalizer = 1; diff --git a/mlir/lib/Transforms/Utils/Utils.cpp b/mlir/lib/Transforms/Utils/Utils.cpp index 5f1bf93..e57d40e 100644 --- a/mlir/lib/Transforms/Utils/Utils.cpp +++ b/mlir/lib/Transforms/Utils/Utils.cpp @@ -389,7 +389,7 @@ void mlir::createAffineComputationSlice( } } -// TODO: Currently works for static memrefs with single non-identity layout map. +// TODO: Currently works for static memrefs with a single layout map. LogicalResult mlir::normalizeMemRef(AllocOp allocOp) { MemRefType memrefType = allocOp.getType(); unsigned rank = memrefType.getRank(); @@ -403,16 +403,12 @@ LogicalResult mlir::normalizeMemRef(AllocOp allocOp) { AffineMap layoutMap = layoutMaps.front(); + // Nothing to do for identity layout maps. if (layoutMap == b.getMultiDimIdentityMap(rank)) return success(); - if (layoutMap.getNumResults() < rank) - // This is a sufficient condition for not being one-to-one; the map is thus - // invalid. Leave it alone. (Undefined behavior?) - return failure(); - - // We don't do any more non-trivial checks for one-to-one'ness; we - // assume that it is one-to-one. + // We don't do any checks for one-to-one'ness; we assume that it is + // one-to-one. // TODO: Only for static memref's for now. if (memrefType.getNumDynamicDims() > 0) @@ -421,7 +417,7 @@ LogicalResult mlir::normalizeMemRef(AllocOp allocOp) { // We have a single map that is not an identity map. Create a new memref with // the right shape and an identity layout map. auto shape = memrefType.getShape(); - FlatAffineConstraints fac(rank, 0); + FlatAffineConstraints fac(rank, allocOp.getNumSymbolicOperands()); for (unsigned d = 0; d < rank; ++d) { fac.addConstantLowerBound(d, 0); fac.addConstantUpperBound(d, shape[d] - 1); @@ -430,7 +426,10 @@ LogicalResult mlir::normalizeMemRef(AllocOp allocOp) { // We compose this map with the original index (logical) space to derive the // upper bounds for the new index space. unsigned newRank = layoutMap.getNumResults(); - fac.composeMatchingMap(layoutMap); + if (failed(fac.composeMatchingMap(layoutMap))) + // TODO: semi-affine maps. + return failure(); + // Project out the old data dimensions. fac.projectOut(newRank, fac.getNumIds() - newRank - fac.getNumLocalIds()); SmallVector newShape(newRank); diff --git a/mlir/test/Transforms/memref-normalize.mlir b/mlir/test/Transforms/memref-normalize.mlir index 319c51b..c4973e8 100644 --- a/mlir/test/Transforms/memref-normalize.mlir +++ b/mlir/test/Transforms/memref-normalize.mlir @@ -66,6 +66,35 @@ func @data_tiling() { return } +// Strides 2 and 4 along respective dimensions. +// CHECK-LABEL: func @strided +func @strided() { + %A = alloc() : memref<64x128xf32, (d0, d1) -> (2*d0, 4*d1)> + // CHECK: affine.for %[[IV0:.*]] = + affine.for %i = 0 to 64 { + // CHECK: affine.for %[[IV1:.*]] = + affine.for %j = 0 to 128 { + // CHECK: affine.load %{{.*}}[%[[IV0]] * 2, %[[IV1]] * 4] : memref<127x509xf32> + affine.load %A[%i, %j] : memref<64x128xf32, (d0, d1) -> (2*d0, 4*d1)> + } + } + return +} + +// Strided, but the strides are in the linearized space. +// CHECK-LABEL: func @strided_cumulative +func @strided_cumulative() { + %A = alloc() : memref<2x5xf32, (d0, d1) -> (3*d0 + 17*d1)> + // CHECK: affine.for %[[IV0:.*]] = + affine.for %i = 0 to 2 { + // CHECK: affine.for %[[IV1:.*]] = + affine.for %j = 0 to 5 { + // CHECK: affine.load %{{.*}}[%[[IV0]] * 3 + %[[IV1]] * 17] : memref<72xf32> + affine.load %A[%i, %j] : memref<2x5xf32, (d0, d1) -> (3*d0 + 17*d1)> + } + } + return +} // Memref escapes; no normalization. // CHECK-LABEL: func @escaping() -> memref<64xf32, #map{{[0-9]+}}> @@ -74,3 +103,16 @@ func @escaping() -> memref<64xf32, (d0) -> (d0 + 2)> { %A = alloc() : memref<64xf32, (d0) -> (d0 + 2)> return %A : memref<64xf32, (d0) -> (d0 + 2)> } + +// Semi-affine maps, normalization not implemented yet. +// CHECK-LABEL: func @semi_affine_layout_map +func @semi_affine_layout_map(%s0: index, %s1: index) { + %A = alloc()[%s0, %s1] : memref<256x1024xf32, (d0, d1)[s0, s1] -> (d0*s0 + d1*s1)> + affine.for %i = 0 to 256 { + affine.for %j = 0 to 1024 { + // CHECK: memref<256x1024xf32, #map{{[0-9]+}}> + affine.load %A[%i, %j] : memref<256x1024xf32, (d0, d1)[s0, s1] -> (d0*s0 + d1*s1)> + } + } + return +} -- 2.7.4