From 89227b670dcf60f1a7e75f57de8f88bad430d5c8 Mon Sep 17 00:00:00 2001 From: Tom Eccles Date: Fri, 2 Jun 2023 16:53:20 +0000 Subject: [PATCH] [flang][hlfir] relax the strictness of intrinsic verifiers The verifiers for hlfir.matmul and hlfir.transpose try to ensure that the shape of the result value makes sense given the shapes of the input argument(s). It there are some cases in the gfortran tests where lowering knows a bit more about shape information than (HL)FIR. I think the cases here will be solved when hlfir.shape_meet is implemented. But in the meantime, and to improve robustness, I've relaxed the verifier to allow the return type to have more precise shape information than can be deduced from the argument type(s). Differential Revision: https://reviews.llvm.org/D152254 --- flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp | 11 ++++++--- flang/test/Lower/HLFIR/matmul.f90 | 38 +++++++++++++++++++++++++++++++ flang/test/Lower/HLFIR/transpose.f90 | 13 +++++++++++ 3 files changed, 59 insertions(+), 3 deletions(-) diff --git a/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp b/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp index c094b66..7a00bfa 100644 --- a/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp +++ b/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp @@ -778,9 +778,11 @@ mlir::LogicalResult hlfir::MatmulOp::verify() { } if (resultShape.size() != expectedResultShape.size()) return emitOpError("incorrect result shape"); - if (resultShape[0] != expectedResultShape[0]) + if (resultShape[0] != expectedResultShape[0] && + expectedResultShape[0] != unknownExtent) return emitOpError("incorrect result shape"); - if (resultShape.size() == 2 && resultShape[1] != expectedResultShape[1]) + if (resultShape.size() == 2 && resultShape[1] != expectedResultShape[1] && + expectedResultShape[1] != unknownExtent) return emitOpError("incorrect result shape"); return mlir::success(); @@ -852,7 +854,10 @@ mlir::LogicalResult hlfir::TransposeOp::verify() { if (rank != 2 || resultRank != 2) return emitOpError("input and output arrays should have rank 2"); - if (inShape[0] != resultShape[1] || inShape[1] != resultShape[0]) + constexpr int64_t unknownExtent = fir::SequenceType::getUnknownExtent(); + if ((inShape[0] != resultShape[1]) && (inShape[0] != unknownExtent)) + return emitOpError("output shape does not match input array"); + if ((inShape[1] != resultShape[0]) && (inShape[1] != unknownExtent)) return emitOpError("output shape does not match input array"); if (eleTy != resultEleTy) diff --git a/flang/test/Lower/HLFIR/matmul.f90 b/flang/test/Lower/HLFIR/matmul.f90 index 93cb700..6e09c18 100644 --- a/flang/test/Lower/HLFIR/matmul.f90 +++ b/flang/test/Lower/HLFIR/matmul.f90 @@ -17,3 +17,41 @@ endsubroutine ! CHECK-NEXT: hlfir.destroy %[[EXPR]] ! CHECK-NEXT: return ! CHECK-NEXT: } + +! regression test for a case where the AST and FIR have different amounts of +! shape inference +subroutine matmul2(c) + integer, parameter :: N = 4 + integer, dimension(:,:), allocatable :: a, b, c + integer, dimension(N,N) :: x + + allocate(a(3*N, N), b(N, N), c(3*N, N)) + + call fill(a) + call fill(b) + call fill(x) + + c = matmul(a, b - x) +endsubroutine +! CHECK-LABEL: func.func @_QPmatmul2 +! CHECK: %[[C_ARG:.*]]: !fir.ref>>> +! CHECK: %[[B_BOX_ALLOC:.*]] = fir.alloca !fir.box>> {bindc_name = "b" +! CHECK: %[[B_BOX_DECL:.*]]:2 = hlfir.declare %[[B_BOX_ALLOC]] {{.*}} uniq_name = "_QFmatmul2Eb" + + +! CHECK: fir.call @_QPfill +! CHECK: fir.call @_QPfill +! CHECK: fir.call @_QPfill +! CHECK-NEXT: %[[B_BOX:.*]] = fir.load %[[B_BOX_DECL]]#0 : !fir.ref>>> +! CHECK-NEXT: %[[C0:.*]] = arith.constant 0 : index +! CHECK-NEXT: %[[B_DIMS_0:.*]]:3 = fir.box_dims %[[B_BOX]], %[[C0]] +! CHECK-NEXT: %[[C1:.*]] = arith.constant 1 : index +! CHECK-NEXT: %[[B_DIMS_1:.*]]:3 = fir.box_dims %[[B_BOX]], %[[C1]] +! CHECK-NEXT: %[[B_SHAPE:.*]] = fir.shape %[[B_DIMS_0]]#1, %[[B_DIMS_1]]#1 +! CHECK-NEXT: %[[ELEMENTAL:.*]] = hlfir.elemental %[[B_SHAPE]] : (!fir.shape<2>) -> !hlfir.expr { + +! CHECK: } +! CHECK-NEXT: %[[A_BOX:.*]] = fir.load %{{.*}} : !fir.ref>>> + +! The shapes in these types are what is being tested: +! CHECK-NEXT: %[[MATMUL:.*]] = hlfir.matmul %[[A_BOX]] %[[ELEMENTAL]] {{.*}} : (!fir.box>>, !hlfir.expr) -> !hlfir.expr diff --git a/flang/test/Lower/HLFIR/transpose.f90 b/flang/test/Lower/HLFIR/transpose.f90 index 05a57e0..56a4c83 100644 --- a/flang/test/Lower/HLFIR/transpose.f90 +++ b/flang/test/Lower/HLFIR/transpose.f90 @@ -15,3 +15,16 @@ endsubroutine ! CHECK-NEXT: hlfir.destroy %[[EXPR]] ! CHECK-NEXT: return ! CHECK-NEXT: } + +! test the case where lowering has more exact information about the output +! shape than is available from the argument +subroutine transpose2(a, out) + real, allocatable, dimension(:) :: a + real, dimension(:,:) :: out + integer, parameter :: N = 3 + integer, parameter :: M = 4 + + allocate(a(N*M)) + out = transpose(reshape(a, (/N, M/))) +end subroutine +! CHECK-LABEL: func.func @_QPtranspose2( -- 2.7.4