From: Krzysztof Parzyszek Date: Wed, 9 Sep 2020 23:05:00 +0000 (-0500) Subject: [GVN] Account for masked loads/stores depending on load/store instructions X-Git-Tag: llvmorg-13-init~12456 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=8a08740db6e13a3a36363c65b7e270cb7c66eb3c;p=platform%2Fupstream%2Fllvm.git [GVN] Account for masked loads/stores depending on load/store instructions This is a case where an intrinsic depends on a non-call instruction. Differential Revision: https://reviews.llvm.org/D87423 --- diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp index 036ca1d..2523cb1 100644 --- a/llvm/lib/Transforms/Scalar/GVN.cpp +++ b/llvm/lib/Transforms/Scalar/GVN.cpp @@ -410,9 +410,12 @@ uint32_t GVN::ValueTable::lookupOrAddCall(CallInst *C) { } if (local_dep.isDef()) { - CallInst* local_cdep = cast(local_dep.getInst()); + // For masked load/store intrinsics, the local_dep may actully be + // a normal load or store instruction. + CallInst *local_cdep = dyn_cast(local_dep.getInst()); - if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) { + if (!local_cdep || + local_cdep->getNumArgOperands() != C->getNumArgOperands()) { valueNumbering[C] = nextValueNumber; return nextValueNumber++; } diff --git a/llvm/test/Transforms/GVN/masked-load-store-vn-crash.ll b/llvm/test/Transforms/GVN/masked-load-store-vn-crash.ll new file mode 100644 index 0000000..ae8369c --- /dev/null +++ b/llvm/test/Transforms/GVN/masked-load-store-vn-crash.ll @@ -0,0 +1,20 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -gvn -S < %s | FileCheck %s +@file_mask = external global [8 x i64], align 32 + +define fastcc void @test() { +; CHECK-LABEL: @test( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[WIDE_MASKED_LOAD_1_I:%.*]] = tail call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* nonnull bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @file_mask, i64 0, i64 7) to <4 x i64>*), i32 8, <4 x i1> , <4 x i64> undef) +; CHECK-NEXT: unreachable +; +entry: + %wide.masked.load.1.i = tail call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* nonnull bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @file_mask, i64 0, i64 7) to <4 x i64>*), i32 8, <4 x i1> , <4 x i64> undef) #2 + %.pre392.i = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @file_mask, i64 0, i64 7), align 8 + %or156.4.i = or i64 %.pre392.i, undef + %wide.masked.load614.1.i = tail call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* nonnull bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @file_mask, i64 0, i64 7) to <4 x i64>*), i32 8, <4 x i1> , <4 x i64> undef) #2 + unreachable +} + +; Function Attrs: argmemonly nounwind readonly willreturn +declare <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>*, i32 immarg, <4 x i1>, <4 x i64>)