From 84eed7843e61e8c24a03deb3dd46eeccfc906373 Mon Sep 17 00:00:00 2001 From: Manish Gupta Date: Thu, 6 Apr 2023 23:58:17 +0000 Subject: [PATCH] [Updated commit] Fix Transpose Check in MMA.SYNC Path. Pushed a stale commit for the same review in my previous commit. I am updating the main-line with the latest commit including review commits. Apologies for the redundant commit. Differential Revision: https://reviews.llvm.org/D147749 --- mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp | 33 +++++++++----- .../VectorToGPU/vector-to-mma-ops-mma-sync.mlir | 50 +++++++++++++++++++++- 2 files changed, 71 insertions(+), 12 deletions(-) diff --git a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp index 10a6ee4..7d643ff 100644 --- a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp +++ b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp @@ -654,27 +654,32 @@ convertConstantOpMmaSync(RewriterBase &rewriter, arith::ConstantOp op, /// Transposed Map Example: /// Example 1 : (..., d0, d1) -> (d1 * 1, d0 * 2) /// Example 2 : (d0, d1, d2, d3) -> (d3, d2) -/// /// The code below checks if the output 2D is transposed using a generalized /// version : (d0, d1, dn, ..., dm, ...) -> (dm, dn) /// Returns : true; if m > n, false o.w. - -static bool isTransposed(vector::TransferReadOp op) { +static FailureOr isTransposed(vector::TransferReadOp op) { mlir::AffineMap map = op.getPermutationMap(); + if (map.getNumResults() != 2) { - op->emitError("Expected 2D transfer read"); + LLVM_DEBUG(DBGS() << "Failed because the result of `vector.transfer_read` " + "is not a 2d operand\n"); + return failure(); } // Output 2D matrix dimensions in the order of d0, d1. - auto dM = map.getResult(0); - auto dN = map.getResult(1); + mlir::AffineExpr dM = map.getResult(0); + mlir::AffineExpr dN = map.getResult(1); // Find the position of these expressions in the input. auto exprM = dM.dyn_cast(); auto exprN = dN.dyn_cast(); + if (!exprM || !exprN) { - op->emitError("Expected to find AffineDimExpr in vector::TransferReadOp"); + LLVM_DEBUG(DBGS() << "Failed because expressions are not affine dim " + "expressions, then transpose cannot be determined.\n"); + return failure(); } + return exprM.getPosition() > exprN.getPosition(); } @@ -699,9 +704,15 @@ creatLdMatrixCompatibleLoads(RewriterBase &rewriter, vector::TransferReadOp op, return rewriter.notifyMatchFailure(op, "not mma sync reg info"); } + FailureOr transpose = isTransposed(op); + if (failed(transpose)) { + LLVM_DEBUG(DBGS() << "failed to determine the transpose\n"); + return rewriter.notifyMatchFailure( + op, "Op should likely not be converted to a nvgpu.ldmatrix call."); + } + FailureOr params = - nvgpu::getLdMatrixParams(*warpMatrixInfo, - /*transpose=*/isTransposed(op)); + nvgpu::getLdMatrixParams(*warpMatrixInfo, *transpose); if (failed(params)) { LLVM_DEBUG( @@ -727,9 +738,9 @@ creatLdMatrixCompatibleLoads(RewriterBase &rewriter, vector::TransferReadOp op, SmallVector indices; getXferIndices(rewriter, op, *offsets, {laneId}, indices); + nvgpu::LdMatrixOp newOp = rewriter.create( - loc, vectorType, op.getSource(), indices, - /*transpose=*/isTransposed(op), params->numTiles); + loc, vectorType, op.getSource(), indices, *transpose, params->numTiles); valueMapping[op] = newOp->getResult(0); return success(); } diff --git a/mlir/test/Conversion/VectorToGPU/vector-to-mma-ops-mma-sync.mlir b/mlir/test/Conversion/VectorToGPU/vector-to-mma-ops-mma-sync.mlir index 331cd85f..4465819 100644 --- a/mlir/test/Conversion/VectorToGPU/vector-to-mma-ops-mma-sync.mlir +++ b/mlir/test/Conversion/VectorToGPU/vector-to-mma-ops-mma-sync.mlir @@ -213,7 +213,6 @@ func.func @m16n8k16_fp16_row_row_row(%arg0: memref<20x20xf16, #gpu.address_space // CHECK-LABEL: func @m16n16k16_mmasync16816_fp16_f16_row_row_row func.func @m16n16k16_mmasync16816_fp16_f16_row_row_row(%arg0: memref<42x32xf16, #gpu.address_space>, %arg1: memref<32x64xf16, #gpu.address_space>, %arg2: memref<42x64xf16, #gpu.address_space>) { - %cst_0 = arith.constant dense<0.000000e+00> : vector<16x8xf16> %c0 = arith.constant 0 : index %c8 = arith.constant 8 : index %cst = arith.constant 0.000000e+00 : f16 @@ -253,6 +252,55 @@ func.func @m16n16k16_mmasync16816_fp16_f16_row_row_row(%arg0: memref<42x32xf16, } // ----- +//################################################################################################################# +// FP16 row-row-row (Determine the transpose for multi-dimensional vector.transfer_read in vector-to-gpu lowering) +//################################################################################################################# + +// CHECK-DAG: [[$strided_map:#.+]] = affine_map<()[s0] -> (s0 mod 16)> +// CHECK-DAG: [[$contiguous_map:#.+]] = affine_map<()[s0] -> ((s0 floordiv 16) * 8)> + +#map0 = affine_map<(d0, d1, d2) -> (d2, d1)> +#map1 = affine_map<(d0, d1, d2) -> (d0, d2)> +#map2 = affine_map<(d0, d1, d2) -> (d1, d2)> +#map3 = affine_map<(d0, d1, d2) -> (d0, d1)> +#map_a = affine_map<(d0, d1, d2, d3) -> (d1, d3)> +#map_b = affine_map<(d0, d1, d2, d3) -> (d3, d2)> + +// CHECK-LABEL: func @multi_dim_m16n8k16_fp16_row_row_row +func.func @multi_dim_m16n8k16_fp16_row_row_row(%arg0: memref<4x32x1x32xf16, #gpu.address_space>, %arg1: memref<4x1x32x32xf16, #gpu.address_space>, %arg2: memref<1x32x40xf16, #gpu.address_space>) { + + // CHECK-DAG: [[c0:%.+]] = arith.constant 0 : index + %c0 = arith.constant 0 : index + %cst = arith.constant 0.000000e+00 : f16 + + // CHECK-DAG: [[m_coord:%.+]] = affine.apply [[$strided_map]] + // CHECK-DAG: [[k_coord:%.+]] = affine.apply [[$contiguous_map]] + // CHECK: [[fragmentA:%.+]] = nvgpu.ldmatrix %arg0[[[c0]], [[m_coord]], [[c0]], [[k_coord]]] {numTiles = 4 : i32, transpose = false} + %A = vector.transfer_read %arg0[%c0, %c0, %c0, %c0], %cst {in_bounds = [true, true], permutation_map = #map_a} : memref<4x32x1x32xf16, #gpu.address_space>, vector<16x16xf16> + + // CHECK-DAG: [[n_coord:%.+]] = affine.apply [[$contiguous_map]] + // CHECK-DAG: [[k_coord:%.+]] = affine.apply [[$strided_map]] + // CHECK-DAG: [[fragmentB:%.+]] = nvgpu.ldmatrix %arg1[[[c0]], [[c0]], [[k_coord]], [[n_coord]]] {numTiles = 4 : i32, transpose = true} + %B = vector.transfer_read %arg1[%c0, %c0, %c0, %c0], %cst {in_bounds = [true, true], permutation_map = #map_b} : memref<4x1x32x32xf16, #gpu.address_space>, vector<16x16xf16> + + // CHECK-DAG: [[m_coord:%.+]] = affine.apply [[$strided_map]] + // CHECK-DAG: [[n_coord:%.+]] = affine.apply [[$contiguous_map]] + // CHECK-DAG: [[fragmentC:%.*]] = nvgpu.ldmatrix %arg2[[[c0]], [[m_coord]], [[n_coord]]] {numTiles = 4 : i32, transpose = false} + %C = vector.transfer_read %arg2[%c0, %c0, %c0], %cst {in_bounds = [true, true]} : memref<1x32x40xf16, #gpu.address_space>, vector<16x16xf16> + + // CHECK-DAG: [[fragmentB0:%.+]] = vector.extract_strided_slice [[fragmentB]] {offsets = [0, 0], sizes = [2, 2], strides = [1, 1]} : vector<4x2xf16> to vector<2x2xf16> + // CHECK-DAG: [[fragmentC0:%.+]] = vector.extract_strided_slice [[fragmentC]] {offsets = [0, 0], sizes = [2, 2], strides = [1, 1]} : vector<4x2xf16> to vector<2x2xf16> + // CHECK: nvgpu.mma.sync([[fragmentA]], [[fragmentB0]], [[fragmentC0]]) {mmaShape = [16, 8, 16]} : (vector<4x2xf16>, vector<2x2xf16>, vector<2x2xf16>) -> vector<2x2xf16> + %B0 = vector.extract_strided_slice %B {offsets = [0, 0], sizes = [8, 16], strides = [1, 1]} : vector<16x16xf16> to vector<8x16xf16> + %C0 = vector.extract_strided_slice %C {offsets = [0, 0], sizes = [16, 8], strides = [1, 1]} : vector<16x16xf16> to vector<16x8xf16> + %D0 = vector.contract {indexing_maps = [#map1, #map2, #map3], iterator_types = ["parallel", "parallel", "reduction"], kind = #vector.kind} %A, %B0, %C0 : vector<16x16xf16>, vector<8x16xf16> into vector<16x8xf16> + vector.transfer_write %D0, %arg2[%c0, %c0, %c0] {in_bounds = [true, true]} : vector<16x8xf16>, memref<1x32x40xf16, #gpu.address_space> + + return +} + +// ----- + // CHECK-DAG: [[$strided_map:#.+]] = affine_map<()[s0] -> (s0 mod 16)> // CHECK-DAG: [[$contiguous_map:#.+]] = affine_map<()[s0] -> ((s0 floordiv 16) * 8)> -- 2.7.4