}
if (local_dep.isDef()) {
- CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
+ // For masked load/store intrinsics, the local_dep may actully be
+ // a normal load or store instruction.
+ CallInst *local_cdep = dyn_cast<CallInst>(local_dep.getInst());
- if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) {
+ if (!local_cdep ||
+ local_cdep->getNumArgOperands() != C->getNumArgOperands()) {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -gvn -S < %s | FileCheck %s
+@file_mask = external global [8 x i64], align 32
+
+define fastcc void @test() {
+; CHECK-LABEL: @test(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[WIDE_MASKED_LOAD_1_I:%.*]] = tail call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* nonnull bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @file_mask, i64 0, i64 7) to <4 x i64>*), i32 8, <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i64> undef)
+; CHECK-NEXT: unreachable
+;
+entry:
+ %wide.masked.load.1.i = tail call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* nonnull bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @file_mask, i64 0, i64 7) to <4 x i64>*), i32 8, <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i64> undef) #2
+ %.pre392.i = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @file_mask, i64 0, i64 7), align 8
+ %or156.4.i = or i64 %.pre392.i, undef
+ %wide.masked.load614.1.i = tail call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* nonnull bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @file_mask, i64 0, i64 7) to <4 x i64>*), i32 8, <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i64> undef) #2
+ unreachable
+}
+
+; Function Attrs: argmemonly nounwind readonly willreturn
+declare <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>*, i32 immarg, <4 x i1>, <4 x i64>)