From: Philip Reames Date: Fri, 10 Jun 2022 20:15:11 +0000 (-0700) Subject: [RISCV] Implement isElementTypeLegalForScalableVector TTI hook X-Git-Tag: upstream/15.0.7~5148 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=f7bb691d6140e83699a89c4adf67c1d850852a9c;p=platform%2Fupstream%2Fllvm.git [RISCV] Implement isElementTypeLegalForScalableVector TTI hook This brings us into alignment with AArch64, and in the process fixes a compiler crash bug in uniform store handling in the vectorizer. Before the recent invalid cost bailout work, this would have also avoided crashes on invalid costs in some cases. I honestly think the vectorizer should gracefully bailout on uniform stores it can't use a scatter for, but it doesn't, so lets take the path of least resistance here. It's also possible that there are other vectorizer bugs AArch64 isn't seeing because of this hook; we don't want to be finding them either. Differential Revision: https://reviews.llvm.org/D127514 --- diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h index 03a4caa..6a5092b 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h @@ -105,6 +105,10 @@ public: Optional FMF, TTI::TargetCostKind CostKind); + bool isElementTypeLegalForScalableVector(Type *Ty) const { + return TLI->isLegalElementTypeForRVV(Ty); + } + bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) { if (!ST->hasVInstructions()) return false; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/illegal-type.ll b/llvm/test/Transforms/LoopVectorize/RISCV/illegal-type.ll new file mode 100644 index 0000000..edac891 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/RISCV/illegal-type.ll @@ -0,0 +1,170 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -loop-vectorize -mattr=+v -force-vector-width=4 -scalable-vectorization=on -S 2>&1 | FileCheck %s +target triple = "riscv64-linux-gnu" + +; +define dso_local void @loop_i128(i128* nocapture %ptr, i64 %N) { +; CHECK-LABEL: @loop_i128( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i128, i128* [[PTR:%.*]], i64 [[IV]] +; CHECK-NEXT: [[TMP0:%.*]] = load i128, i128* [[ARRAYIDX]], align 16 +; CHECK-NEXT: [[ADD:%.*]] = add nsw i128 [[TMP0]], 42 +; CHECK-NEXT: store i128 [[ADD]], i128* [[ARRAYIDX]], align 16 +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N:%.*]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: for.end: +; CHECK-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds i128, i128* %ptr, i64 %iv + %0 = load i128, i128* %arrayidx, align 16 + %add = add nsw i128 %0, 42 + store i128 %add, i128* %arrayidx, align 16 + %iv.next = add i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %N + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 + +for.end: + ret void +} + +define dso_local void @loop_f128(fp128* nocapture %ptr, i64 %N) { +; CHECK-LABEL: @loop_f128( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds fp128, fp128* [[PTR:%.*]], i64 [[IV]] +; CHECK-NEXT: [[TMP0:%.*]] = load fp128, fp128* [[ARRAYIDX]], align 16 +; CHECK-NEXT: [[ADD:%.*]] = fsub fp128 [[TMP0]], 0xL00000000000000008000000000000000 +; CHECK-NEXT: store fp128 [[ADD]], fp128* [[ARRAYIDX]], align 16 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N:%.*]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0]] +; CHECK: for.end: +; CHECK-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds fp128, fp128* %ptr, i64 %iv + %0 = load fp128, fp128* %arrayidx, align 16 + %add = fsub fp128 %0, 0xL00000000000000008000000000000000 + store fp128 %add, fp128* %arrayidx, align 16 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %N + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 + +for.end: + ret void +} + +define dso_local void @loop_invariant_i128(i128* nocapture %ptr, i128 %val, i64 %N) { +; CHECK-LABEL: @loop_invariant_i128( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i128, i128* [[PTR:%.*]], i64 [[IV]] +; CHECK-NEXT: store i128 [[VAL:%.*]], i128* [[ARRAYIDX]], align 16 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N:%.*]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0]] +; CHECK: for.end: +; CHECK-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds i128, i128* %ptr, i64 %iv + store i128 %val, i128* %arrayidx, align 16 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %N + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 + +for.end: + ret void +} + +define void @uniform_store_i1(i1* noalias %dst, i64* noalias %start, i64 %N) { +; CHECK-LABEL: @uniform_store_i1( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[FIRST_SROA:%.*]] = phi i64* [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[START:%.*]], [[ENTRY:%.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ] +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[FIRST_SROA]], align 4 +; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i64, i64* [[FIRST_SROA]], i64 1 +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i64* [[INCDEC_PTR]], [[START]] +; CHECK-NEXT: store i1 [[CMP_NOT]], i1* [[DST:%.*]], align 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV]], [[N:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[END:%.*]], !llvm.loop [[LOOP0]] +; CHECK: end: +; CHECK-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %first.sroa = phi i64* [ %incdec.ptr, %for.body ], [ %start, %entry ] + %iv = phi i64 [ %iv.next, %for.body ], [ 0, %entry ] + %iv.next = add i64 %iv, 1 + %0 = load i64, i64* %first.sroa + %incdec.ptr = getelementptr inbounds i64, i64* %first.sroa, i64 1 + %cmp.not = icmp eq i64* %incdec.ptr, %start + store i1 %cmp.not, i1* %dst + %cmp = icmp ult i64 %iv, %N + br i1 %cmp, label %for.body, label %end, !llvm.loop !0 + +end: + ret void +} + +define dso_local void @loop_fixed_width_i128(i128* nocapture %ptr, i64 %N) { +; CHECK-LABEL: @loop_fixed_width_i128( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i128, i128* [[PTR:%.*]], i64 [[IV]] +; CHECK-NEXT: [[TMP0:%.*]] = load i128, i128* [[ARRAYIDX]], align 16 +; CHECK-NEXT: [[ADD:%.*]] = add nsw i128 [[TMP0]], 42 +; CHECK-NEXT: store i128 [[ADD]], i128* [[ARRAYIDX]], align 16 +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N:%.*]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]] +; CHECK: for.end: +; CHECK-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds i128, i128* %ptr, i64 %iv + %0 = load i128, i128* %arrayidx, align 16 + %add = add nsw i128 %0, 42 + store i128 %add, i128* %arrayidx, align 16 + %iv.next = add i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %N + br i1 %exitcond.not, label %for.end, label %for.body + +for.end: + ret void +} + +!0 = distinct !{!0, !1} +!1 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}