SourceLocation AssumptionLoc,
llvm::Value *Alignment,
llvm::Value *OffsetValue) {
- if (Alignment->getType() != IntPtrTy)
- Alignment =
- Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align");
- if (OffsetValue && OffsetValue->getType() != IntPtrTy)
- OffsetValue =
- Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset");
- llvm::Value *TheCheck = nullptr;
+ llvm::Value *TheCheck;
+ llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
+ CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
if (SanOpts.has(SanitizerKind::Alignment)) {
- llvm::Value *PtrIntValue =
- Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
-
- if (OffsetValue) {
- bool IsOffsetZero = false;
- if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue))
- IsOffsetZero = CI->isZero();
-
- if (!IsOffsetZero)
- PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr");
- }
-
- llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0);
- llvm::Value *Mask =
- Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1));
- llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr");
- TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond");
+ emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
+ OffsetValue, TheCheck, Assumption);
}
- llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
- CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);
-
- if (!SanOpts.has(SanitizerKind::Alignment))
- return;
- emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
- OffsetValue, TheCheck, Assumption);
}
void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.ad_struct*, %struct.ad_struct** [[X_ADDR]], align 8
// CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_AD_STRUCT:%.*]], %struct.ad_struct* [[TMP0]], i32 0, i32 0
// CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[A]], align 8
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[TMP1]], i64 64) ]
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: ret double* [[TMP1]]
//
double *foo(ad_struct& x) {
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.ad_struct*, %struct.ad_struct** [[X_ADDR]], align 8
// CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_AD_STRUCT:%.*]], %struct.ad_struct* [[TMP0]], i32 0, i32 0
// CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[A]], align 8
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[TMP1]], i64 64) ]
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: ret double* [[TMP1]]
//
double *goo(ad_struct *x) {
// CHECK-NEXT: store double** [[X]], double*** [[X_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[TMP0]], align 8
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[TMP1]], i64 64) ]
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: ret double* [[TMP1]]
//
double *bar(aligned_double *x) {
// CHECK-NEXT: store double** [[X]], double*** [[X_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[TMP0]], align 8
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[TMP1]], i64 64) ]
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: ret double* [[TMP1]]
//
double *car(aligned_double &x) {
// CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double*, double** [[TMP0]], i64 5
// CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[ARRAYIDX]], align 8
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[TMP1]], i64 64) ]
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: ret double* [[TMP1]]
//
double *dar(aligned_double *x) {
// CHECK-LABEL: define {{[^@]+}}@_Z3retv() #0
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CALL:%.*]] = call double* @_Z3eepv()
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[CALL]], i64 64) ]
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: ret double* [[CALL]]
//
double *ret() {
// CHECK-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m1(i32 [[TMP0]])
-// CHECK-NEXT: [[CASTED_ALIGN:%.*]] = zext i32 [[TMP0]] to i64
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ]
+// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64
+// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
// CHECK-NEXT: ret i32 [[TMP1]]
//
// CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
// CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m1(i32 [[CONV]])
-// CHECK-NEXT: [[CASTED_ALIGN:%.*]] = zext i32 [[CONV]] to i64
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ]
+// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[CONV]] to i64
+// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
// CHECK-NEXT: ret i32 [[TMP1]]
//
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m2(i64 [[CONV]])
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CONV]]) ]
+// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[CONV]], 1
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
// CHECK-NEXT: ret i32 [[TMP1]]
//
// CHECK-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m2(i64 [[TMP0]])
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[TMP0]]) ]
+// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[TMP0]], 1
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
// CHECK-NEXT: ret i32 [[TMP1]]
//
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 1
// CHECK-NEXT: [[TMP8:%.*]] = load i64, i64* [[TMP7]], align 8
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m3(i64 [[TMP6]], i64 [[TMP8]])
-// CHECK-NEXT: [[CASTED_ALIGN:%.*]] = trunc i128 [[TMP3]] to i64
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ]
+// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = trunc i128 [[TMP3]] to i64
+// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[CALL]], align 4
// CHECK-NEXT: ret i32 [[TMP9]]
//
// CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP9]], i32 0, i32 1
// CHECK-NEXT: [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m4(i64 [[TMP6]], i64 [[TMP8]], i64 [[TMP11]], i64 [[TMP13]])
-// CHECK-NEXT: [[CASTED_ALIGN:%.*]] = trunc i128 [[TMP3]] to i64
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ]
+// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = trunc i128 [[TMP3]] to i64
+// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[CALL]], align 4
// CHECK-NEXT: ret i32 [[TMP14]]
//
// CHECK-NEXT: store i32 [[ALIGNMENT:%.*]], i32* [[ALIGNMENT_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ALIGNMENT_ADDR]], align 4
// CHECK-NEXT: [[CALL:%.*]] = call align 32 i8* @my_aligned_alloc(i32 320, i32 [[TMP0]])
-// CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[CALL]], i64 [[TMP1]]) ]
+// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64
+// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: ret i8* [[CALL]]
//
void *t3_variable(int alignment) {
extern int func(char *c);
-// CHECK-LABEL: @test_array(
+// CHECK-LABEL: define {{[^@]+}}@test_array() #0
// CHECK-NEXT: entry:
// CHECK-NEXT: [[BUF:%.*]] = alloca [1024 x i8], align 16
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 44
// CHECK-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], -16
// CHECK-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]]
// CHECK-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[ARRAYIDX]], i64 [[DIFF]]
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT]], i64 16) ]
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[ALIGNED_RESULT]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 15
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: [[CALL:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT]])
// CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 22
// CHECK-NEXT: [[INTPTR2:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64
// CHECK-NEXT: [[ALIGNED_INTPTR4:%.*]] = and i64 [[OVER_BOUNDARY]], -32
// CHECK-NEXT: [[DIFF5:%.*]] = sub i64 [[ALIGNED_INTPTR4]], [[INTPTR2]]
// CHECK-NEXT: [[ALIGNED_RESULT6:%.*]] = getelementptr inbounds i8, i8* [[ARRAYIDX1]], i64 [[DIFF5]]
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT6]], i64 32) ]
-// CHECK-NEXT: [[CALL7:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT6]])
-// CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 16
-// CHECK-NEXT: [[SRC_ADDR:%.*]] = ptrtoint i8* [[ARRAYIDX8]] to i64
+// CHECK-NEXT: [[PTRINT7:%.*]] = ptrtoint i8* [[ALIGNED_RESULT6]] to i64
+// CHECK-NEXT: [[MASKEDPTR8:%.*]] = and i64 [[PTRINT7]], 31
+// CHECK-NEXT: [[MASKCOND9:%.*]] = icmp eq i64 [[MASKEDPTR8]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND9]])
+// CHECK-NEXT: [[CALL10:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT6]])
+// CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 16
+// CHECK-NEXT: [[SRC_ADDR:%.*]] = ptrtoint i8* [[ARRAYIDX11]] to i64
// CHECK-NEXT: [[SET_BITS:%.*]] = and i64 [[SRC_ADDR]], 63
// CHECK-NEXT: [[IS_ALIGNED:%.*]] = icmp eq i64 [[SET_BITS]], 0
// CHECK-NEXT: [[CONV:%.*]] = zext i1 [[IS_ALIGNED]] to i32
return __builtin_is_aligned(&buf[16], 64);
}
-// CHECK-LABEL: @test_array_should_not_mask(
+// CHECK-LABEL: define {{[^@]+}}@test_array_should_not_mask() #0
// CHECK-NEXT: entry:
// CHECK-NEXT: [[BUF:%.*]] = alloca [1024 x i8], align 32
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 64
// CHECK-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], -16
// CHECK-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]]
// CHECK-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[ARRAYIDX]], i64 [[DIFF]]
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT]], i64 16) ]
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[ALIGNED_RESULT]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 15
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: [[CALL:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT]])
// CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 32
// CHECK-NEXT: [[INTPTR2:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64
// CHECK-NEXT: [[ALIGNED_INTPTR4:%.*]] = and i64 [[OVER_BOUNDARY]], -32
// CHECK-NEXT: [[DIFF5:%.*]] = sub i64 [[ALIGNED_INTPTR4]], [[INTPTR2]]
// CHECK-NEXT: [[ALIGNED_RESULT6:%.*]] = getelementptr inbounds i8, i8* [[ARRAYIDX1]], i64 [[DIFF5]]
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT6]], i64 32) ]
-// CHECK-NEXT: [[CALL7:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT6]])
+// CHECK-NEXT: [[PTRINT7:%.*]] = ptrtoint i8* [[ALIGNED_RESULT6]] to i64
+// CHECK-NEXT: [[MASKEDPTR8:%.*]] = and i64 [[PTRINT7]], 31
+// CHECK-NEXT: [[MASKCOND9:%.*]] = icmp eq i64 [[MASKEDPTR8]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND9]])
+// CHECK-NEXT: [[CALL10:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT6]])
// CHECK-NEXT: ret i32 1
//
int test_array_should_not_mask(void) {
// CHECK-VOID_PTR-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[OVER_BOUNDARY]], [[INVERTED_MASK]]
// CHECK-VOID_PTR-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]]
// CHECK-VOID_PTR-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i64 [[DIFF]]
-// CHECK-VOID_PTR-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT]], i64 [[ALIGNMENT]]) ]
+// CHECK-VOID_PTR-NEXT: [[MASK1:%.*]] = sub i64 [[ALIGNMENT]], 1
+// CHECK-VOID_PTR-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[ALIGNED_RESULT]] to i64
+// CHECK-VOID_PTR-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK1]]
+// CHECK-VOID_PTR-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-VOID_PTR-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-VOID_PTR-NEXT: ret i8* [[ALIGNED_RESULT]]
//
// CHECK-FLOAT_PTR-LABEL: define {{[^@]+}}@align_up
// CHECK-FLOAT_PTR-NEXT: [[TMP0:%.*]] = bitcast float* [[PTR]] to i8*
// CHECK-FLOAT_PTR-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i64 [[DIFF]]
// CHECK-FLOAT_PTR-NEXT: [[TMP1:%.*]] = bitcast i8* [[ALIGNED_RESULT]] to float*
-// CHECK-FLOAT_PTR-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[TMP1]], i64 [[ALIGNMENT]]) ]
+// CHECK-FLOAT_PTR-NEXT: [[MASK1:%.*]] = sub i64 [[ALIGNMENT]], 1
+// CHECK-FLOAT_PTR-NEXT: [[PTRINT:%.*]] = ptrtoint float* [[TMP1]] to i64
+// CHECK-FLOAT_PTR-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK1]]
+// CHECK-FLOAT_PTR-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-FLOAT_PTR-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-FLOAT_PTR-NEXT: ret float* [[TMP1]]
//
// CHECK-LONG-LABEL: define {{[^@]+}}@align_up
// CHECK-VOID_PTR-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], [[INVERTED_MASK]]
// CHECK-VOID_PTR-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]]
// CHECK-VOID_PTR-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i64 [[DIFF]]
-// CHECK-VOID_PTR-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT]], i64 [[ALIGNMENT]]) ]
+// CHECK-VOID_PTR-NEXT: [[MASK1:%.*]] = sub i64 [[ALIGNMENT]], 1
+// CHECK-VOID_PTR-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[ALIGNED_RESULT]] to i64
+// CHECK-VOID_PTR-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK1]]
+// CHECK-VOID_PTR-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-VOID_PTR-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-VOID_PTR-NEXT: ret i8* [[ALIGNED_RESULT]]
//
// CHECK-FLOAT_PTR-LABEL: define {{[^@]+}}@align_down
// CHECK-FLOAT_PTR-NEXT: [[TMP0:%.*]] = bitcast float* [[PTR]] to i8*
// CHECK-FLOAT_PTR-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i64 [[DIFF]]
// CHECK-FLOAT_PTR-NEXT: [[TMP1:%.*]] = bitcast i8* [[ALIGNED_RESULT]] to float*
-// CHECK-FLOAT_PTR-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[TMP1]], i64 [[ALIGNMENT]]) ]
+// CHECK-FLOAT_PTR-NEXT: [[MASK1:%.*]] = sub i64 [[ALIGNMENT]], 1
+// CHECK-FLOAT_PTR-NEXT: [[PTRINT:%.*]] = ptrtoint float* [[TMP1]] to i64
+// CHECK-FLOAT_PTR-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK1]]
+// CHECK-FLOAT_PTR-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-FLOAT_PTR-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-FLOAT_PTR-NEXT: ret float* [[TMP1]]
//
// CHECK-LONG-LABEL: define {{[^@]+}}@align_down
// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32, i64 0) ]
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
// CHECK-NEXT: store i32* [[TMP2]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32, i64 0) ]
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
// CHECK-NEXT: store i32* [[TMP2]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32) ]
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
// CHECK-NEXT: store i32* [[TMP2]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[B_ADDR]], align 4
// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP2]] to i64
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32, i64 [[CONV]]) ]
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64
+// CHECK-NEXT: [[OFFSETPTR:%.*]] = sub i64 [[PTRINT]], [[CONV]]
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[OFFSETPTR]], 31
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP1]] to i32*
// CHECK-NEXT: store i32* [[TMP3]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP4:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-LABEL: define {{[^@]+}}@test6() #0
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CALL:%.*]] = call i32* (...) @m2()
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 64, i64 12) ]
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
+// CHECK-NEXT: [[OFFSETPTR:%.*]] = sub i64 [[PTRINT]], 12
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[OFFSETPTR]], 63
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[CALL]], align 4
// CHECK-NEXT: ret i32 [[TMP0]]
//
// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 536870912) ]
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 536870911
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
// CHECK-NEXT: store i32* [[TMP2]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-NEXT: %[[X_RELOADED:.*]] = load %[[STRUCT_AC_STRUCT]]*, %[[STRUCT_AC_STRUCT]]** %[[STRUCT_AC_STRUCT_ADDR]], align 8
// CHECK: %[[A_ADDR:.*]] = getelementptr inbounds %[[STRUCT_AC_STRUCT]], %[[STRUCT_AC_STRUCT]]* %[[X_RELOADED]], i32 0, i32 0
// CHECK: %[[A:.*]] = load i8**, i8*** %[[A_ADDR]], align 8
- // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[A]] to i64
- // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 2147483647
- // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0
+ // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[A]] to i64
+ // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 2147483647
+ // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0
// CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8** %[[A]] to i64, !nosanitize
// CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize
// CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]:
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize
// CHECK-SANITIZE: [[CONT]]:
- // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[A]], i64 2147483648) ]
+ // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]])
// CHECK-NEXT: ret i8** %[[A]]
// CHECK-NEXT: }
#line 100
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize
// CHECK-SANITIZE: [[CONT]]:
- // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[X_RELOADED]], i64 2147483648) ]
+ // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 %[[MASKCOND]])
// CHECK-NEXT: ret i8** %[[X_RELOADED]]
// CHECK-NEXT: }
#line 100
// CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8
// CHECK-NEXT: %[[ALIGNMENT_RELOADED:.*]] = load i64, i64* %[[ALIGNMENT_ADDR]], align 8
// CHECK-NEXT: %[[X_RETURNED:.*]] = call i8** @[[PASSTHROUGH]](i8** %[[X_RELOADED]], i64 %[[ALIGNMENT_RELOADED]])
- // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64
- // CHECK-SANITIZE-NEXT: %[[MASK:.*]] = sub i64 %[[ALIGNMENT_RELOADED]], 1
- // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], %[[MASK]]
- // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0
+ // CHECK-NEXT: %[[MASK:.*]] = sub i64 %[[ALIGNMENT_RELOADED]], 1
+ // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64
+ // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], %[[MASK]]
+ // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0
// CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64, !nosanitize
// CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize
// CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]:
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize
// CHECK-SANITIZE: [[CONT]]:
- // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[X_RETURNED]], i64 %1) ]
+ // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]])
// CHECK-NEXT: ret i8** %[[X_RETURNED]]
// CHECK-NEXT: }
#line 100
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize
// CHECK-SANITIZE: [[CONT]]:
- // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[X_RETURNED]], i64 128) ]
+ // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 %[[MASKCOND]])
// CHECK-NEXT: ret i8** %[[X_RETURNED]]
// CHECK-NEXT: }
#line 100
// CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8
// CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8
// CHECK-NEXT: %[[X_RETURNED:.*]] = call i8** @[[PASSTHROUGH]](i8** %[[X_RELOADED]])
- // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64
- // CHECK-SANITIZE-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], 42
- // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 2147483647
- // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0
+ // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64
+ // CHECK-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], 42
+ // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 2147483647
+ // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0
// CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64, !nosanitize
// CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize
// CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]:
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize
// CHECK-SANITIZE: [[CONT]]:
- // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[X_RETURNED]], i64 2147483648, i64 42) ]
+ // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]])
// CHECK-NEXT: ret i8** %[[X_RETURNED]]
// CHECK-NEXT: }
#line 100
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize
// CHECK-SANITIZE: [[CONT]]:
- // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[X_RETURNED]], i64 128) ]
+ // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 %[[MASKCOND]])
// CHECK-NEXT: ret i8** %[[X_RETURNED]]
// CHECK-NEXT: }
#line 100
// CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8
// CHECK-NEXT: %[[BITCAST:.*]] = bitcast i8** %[[X_RELOADED]] to i8*
// CHECK-NEXT: %[[OFFSET_RELOADED:.*]] = load i64, i64* %[[OFFSET_ADDR]], align 8
- // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64
- // CHECK-SANITIZE-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], %[[OFFSET_RELOADED]]
- // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 536870911
- // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0
+ // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64
+ // CHECK-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], %[[OFFSET_RELOADED]]
+ // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 536870911
+ // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0
// CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8* %[[BITCAST]] to i64, !nosanitize
// CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize
// CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]:
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize
// CHECK-SANITIZE: [[CONT]]:
- // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* %[[BITCAST]], i64 536870912, i64 %[[OFFSET_RELOADED]]) ]
+ // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]])
// CHECK-NEXT: ret i8* %[[BITCAST]]
// CHECK-NEXT: }
#line 100
// CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8
// CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8
// CHECK-NEXT: %[[BITCAST:.*]] = bitcast i8** %[[X_RELOADED]] to i8*
- // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64
- // CHECK-SANITIZE-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], 42
- // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 536870911
- // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0
+ // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64
+ // CHECK-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], 42
+ // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 536870911
+ // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0
// CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8* %[[BITCAST]] to i64, !nosanitize
// CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize
// CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]:
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize
// CHECK-SANITIZE: [[CONT]]:
- // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* %[[BITCAST]], i64 536870912, i64 42) ]
+ // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]])
// CHECK-NEXT: ret i8* %[[BITCAST]]
// CHECK-NEXT: }
#line 100
// CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8
// CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8
// CHECK-NEXT: %[[BITCAST:.*]] = bitcast i8** %[[X_RELOADED]] to i8*
- // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64
- // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 536870911
- // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0
+ // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64
+ // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 536870911
+ // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0
// CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8* %[[BITCAST]] to i64, !nosanitize
// CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize
// CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]:
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize
// CHECK-SANITIZE: [[CONT]]:
- // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* %[[BITCAST]], i64 536870912) ]
+ // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]])
// CHECK-NEXT: ret i8* %[[BITCAST]]
// CHECK-NEXT: }
#line 100
// CHECK-NEXT: %[[DATA_ADDR:.*]] = alloca i8*, align 8
// CHECK: store i8* %[[DATA]], i8** %[[DATA_ADDR]], align 8
// CHECK: %[[DATA_RELOADED:.*]] = load i8*, i8** %[[DATA_ADDR]], align 8
- // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[DATA_RELOADED]] to i64
- // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 1073741823
- // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0
+ // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[DATA_RELOADED]] to i64
+ // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 1073741823
+ // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0
// CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8* %[[DATA_RELOADED]] to i64, !nosanitize
// CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize
// CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]:
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize
// CHECK-SANITIZE: [[CONT]]:
- // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* %[[DATA_RELOADED]], i64 1073741824) ]
+ // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]])
#line 100
#pragma omp for simd aligned(data : 0x40000000)
// CHECK-NEXT: store i32 [[ALIGN:%.*]], i32* [[ALIGN_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ALIGN_ADDR]], align 4
// CHECK-NEXT: [[CALL:%.*]] = call i8* @alloc(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[CALL]], i64 [[TMP1]]) ]
+// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64
+// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: ret void
//
void t0(int align) {
// CHECK-NEXT: [[ALIGN_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32 [[ALIGN:%.*]], i32* [[ALIGN_ADDR]], align 4
// CHECK-NEXT: [[CALL:%.*]] = call i8* @alloc(i32 7)
-// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[CALL]], i64 7) ]
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 6
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-NEXT: ret void
//
void t1(int align) {
// TERM_DEBUG: !{{[0-9]+}} = !DILocation(line: [[@LINE-11]],
// CHECK-LABEL: S8
+// CHECK-DAG: ptrtoint [[SS_TY]]* %{{.+}} to i64
+// CHECK-DAG: ptrtoint [[SS_TY]]* %{{.+}} to i64
+// CHECK-DAG: ptrtoint [[SS_TY]]* %{{.+}} to i64
+// CHECK-DAG: ptrtoint [[SS_TY]]* %{{.+}} to i64
+
+// CHECK-DAG: and i64 %{{.+}}, 15
+// CHECK-DAG: icmp eq i64 %{{.+}}, 0
// CHECK-DAG: call void @llvm.assume(i1
+
+// CHECK-DAG: and i64 %{{.+}}, 7
+// CHECK-DAG: icmp eq i64 %{{.+}}, 0
// CHECK-DAG: call void @llvm.assume(i1
+
+// CHECK-DAG: and i64 %{{.+}}, 15
+// CHECK-DAG: icmp eq i64 %{{.+}}, 0
// CHECK-DAG: call void @llvm.assume(i1
+
+// CHECK-DAG: and i64 %{{.+}}, 3
+// CHECK-DAG: icmp eq i64 %{{.+}}, 0
// CHECK-DAG: call void @llvm.assume(i1
struct SS {
SS(): a(0) {}
// CHECK-LABEL: define void @h1
int t = 0;
#pragma omp simd safelen(16) linear(t) aligned(c:32) aligned(a,b)
- // CHECK: call void @llvm.assume(i1 true) [ "align"(float* [[PTR4:%.*]], {{i64|i32}} 32) ]
- // CHECK-NEXT: load
-
- // X86-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]
- // X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 32) ]
- // X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 64) ]
- // PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]
- // PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]
- // CHECK-NEXT: load
-
- // X86-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ]
- // X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ]
- // X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 64) ]
- // PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ]
- // PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ]
+// CHECK: [[C_PTRINT:%.+]] = ptrtoint
+// CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31
+// CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]])
+// CHECK: [[A_PTRINT:%.+]] = ptrtoint
+
+// X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
+// X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31
+// X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63
+// PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
+// PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
+
+// CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]])
+// CHECK: [[B_PTRINT:%.+]] = ptrtoint
+
+// X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15
+// X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31
+// X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63
+// PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15
+// PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31
+
+// CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]])
for (int i = 0; i < size; ++i) {
c[i] = a[i] * a[i] + b[i] * b[t];
++t;
// do not emit llvm.access.group metadata due to usage of safelen clause.
// CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group {{![0-9]+}}
#pragma omp simd safelen(16) linear(t) aligned(c:32) aligned(a,b) simdlen(8)
- // CHECK: call void @llvm.assume(i1 true) [ "align"(float* [[PTR4:%.*]], {{i64|i32}} 32) ]
- // CHECK-NEXT: load
-
- // X86-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]
- // X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 32) ]
- // X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 64) ]
- // PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]
- // PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]
- // CHECK-NEXT: load
-
- // X86-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ]
- // X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ]
- // X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 64) ]
- // PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ]
- // PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ]
+// CHECK: [[C_PTRINT:%.+]] = ptrtoint
+// CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31
+// CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]])
+// CHECK: [[A_PTRINT:%.+]] = ptrtoint
+
+// X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
+// X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31
+// X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63
+// PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
+// PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
+
+// CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]])
+// CHECK: [[B_PTRINT:%.+]] = ptrtoint
+
+// X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15
+// X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31
+// X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63
+// PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15
+// PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31
+
+// CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]])
for (int i = 0; i < size; ++i) {
c[i] = a[i] * a[i] + b[i] * b[t];
++t;
// do not emit llvm.access.group metadata due to usage of safelen clause.
// CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group {{![0-9]+}}
#pragma omp simd linear(t) aligned(c:32) aligned(a,b) simdlen(8)
- // CHECK: call void @llvm.assume(i1 true) [ "align"(float* [[PTR4:%.*]], {{i64|i32}} 32) ]
- // CHECK-NEXT: load
-
- // X86-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]
- // X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 32) ]
- // X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 64) ]
- // PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]
- // PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]
- // CHECK-NEXT: load
-
- // X86-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ]
- // X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ]
- // X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 64) ]
- // PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ]
- // PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ]
+// CHECK: [[C_PTRINT:%.+]] = ptrtoint
+// CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31
+// CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]])
+// CHECK: [[A_PTRINT:%.+]] = ptrtoint
+
+// X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
+// X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31
+// X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63
+// PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
+// PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
+
+// CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]])
+// CHECK: [[B_PTRINT:%.+]] = ptrtoint
+
+// X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15
+// X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31
+// X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63
+// PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15
+// PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31
+
+// CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]])
for (int i = 0; i < size; ++i) {
c[i] = a[i] * a[i] + b[i] * b[t];
++t;
// CK1: define internal void @[[OUTL1]]({{.+}})
// CK1: [[ARRDECAY:%.+]] = getelementptr inbounds [1000 x i32], [1000 x i32]* %{{.+}}, i{{32|64}} 0, i{{32|64}} 0
- // CK1: call void @llvm.assume(i1 true) [ "align"(i32* [[ARRDECAY]], {{i64|i32}} 8) ]
+ // CK1: [[ARR_CAST:%.+]] = ptrtoint i32* [[ARRDECAY]] to i{{32|64}}
+ // CK1: [[MASKED_PTR:%.+]] = and i{{32|64}} [[ARR_CAST]], 7
+ // CK1: [[COND:%.+]] = icmp eq i{{32|64}} [[MASKED_PTR]], 0
+ // CK1: call void @llvm.assume(i1 [[COND]])
// CK1: call void @__kmpc_for_static_init_4(
// CK1: call void {{.+}} @__kmpc_fork_call(
// CK1: call void @__kmpc_for_static_fini(
/// Create an assume intrinsic call that allows the optimizer to
/// assume that the provided condition will be true.
- ///
- /// The optional argument \p OpBundles specifies operand bundles that are
- /// added to the call instruction.
- CallInst *CreateAssumption(Value *Cond,
- ArrayRef<OperandBundleDef> OpBundles = llvm::None);
+ CallInst *CreateAssumption(Value *Cond);
/// Create a call to the experimental.gc.statepoint intrinsic to
/// start a new statepoint sequence.
private:
/// Helper function that creates an assume intrinsic call that
- /// represents an alignment assumption on the provided pointer \p PtrValue
- /// with offset \p OffsetValue and alignment value \p AlignValue.
+ /// represents an alignment assumption on the provided Ptr, Mask, Type
+ /// and Offset. It may be sometimes useful to do some other logic
+ /// based on this alignment check, thus it can be stored into 'TheCheck'.
CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,
- Value *PtrValue, Value *AlignValue,
- Value *OffsetValue);
+ Value *PtrValue, Value *Mask,
+ Type *IntPtrTy, Value *OffsetValue,
+ Value **TheCheck);
public:
/// Create an assume intrinsic call that represents an alignment
/// An optional offset can be provided, and if it is provided, the offset
/// must be subtracted from the provided pointer to get the pointer with the
/// specified alignment.
+ ///
+ /// It may be sometimes useful to do some other logic
+ /// based on this alignment check, thus it can be stored into 'TheCheck'.
CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
unsigned Alignment,
- Value *OffsetValue = nullptr);
+ Value *OffsetValue = nullptr,
+ Value **TheCheck = nullptr);
/// Create an assume intrinsic call that represents an alignment
/// assumption on the provided pointer.
/// must be subtracted from the provided pointer to get the pointer with the
/// specified alignment.
///
+ /// It may be sometimes useful to do some other logic
+ /// based on this alignment check, thus it can be stored into 'TheCheck'.
+ ///
/// This overload handles the condition where the Alignment is dependent
/// on an existing value rather than a static value.
CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
Value *Alignment,
- Value *OffsetValue = nullptr);
+ Value *OffsetValue = nullptr,
+ Value **TheCheck = nullptr);
};
/// This provides a uniform API for creating instructions and inserting
Result.AttrKind = Attribute::getAttrKindFromName(BOI.Tag->getKey());
if (bundleHasArgument(BOI, ABA_WasOn))
Result.WasOn = getValueFromBundleOpInfo(Assume, BOI, ABA_WasOn);
- auto GetArgOr1 = [&](unsigned Idx) -> unsigned {
- if (auto *ConstInt = dyn_cast<ConstantInt>(
- getValueFromBundleOpInfo(Assume, BOI, ABA_Argument + Idx)))
- return ConstInt->getZExtValue();
- return 1;
- };
if (BOI.End - BOI.Begin > ABA_Argument)
- Result.ArgValue = GetArgOr1(0);
- if (Result.AttrKind == Attribute::Alignment)
- if (BOI.End - BOI.Begin > ABA_Argument + 1)
- Result.ArgValue = MinAlign(Result.ArgValue, GetArgOr1(1));
+ Result.ArgValue =
+ cast<ConstantInt>(getValueFromBundleOpInfo(Assume, BOI, ABA_Argument))
+ ->getZExtValue();
return Result;
}
static CallInst *createCallHelper(Function *Callee, ArrayRef<Value *> Ops,
IRBuilderBase *Builder,
const Twine &Name = "",
- Instruction *FMFSource = nullptr,
- ArrayRef<OperandBundleDef> OpBundles = {}) {
- CallInst *CI = Builder->CreateCall(Callee, Ops, OpBundles, Name);
+ Instruction *FMFSource = nullptr) {
+ CallInst *CI = Builder->CreateCall(Callee, Ops, Name);
if (FMFSource)
CI->copyFastMathFlags(FMFSource);
return CI;
return createCallHelper(TheFn, Ops, this);
}
-CallInst *
-IRBuilderBase::CreateAssumption(Value *Cond,
- ArrayRef<OperandBundleDef> OpBundles) {
+CallInst *IRBuilderBase::CreateAssumption(Value *Cond) {
assert(Cond->getType() == getInt1Ty() &&
"an assumption condition must be of type i1");
Value *Ops[] = { Cond };
Module *M = BB->getParent()->getParent();
Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume);
- return createCallHelper(FnAssume, Ops, this, "", nullptr, OpBundles);
+ return createCallHelper(FnAssume, Ops, this);
}
/// Create a call to a Masked Load intrinsic.
return Fn;
}
-CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL,
- Value *PtrValue,
- Value *AlignValue,
- Value *OffsetValue) {
- SmallVector<Value *, 4> Vals({PtrValue, AlignValue});
- if (OffsetValue)
- Vals.push_back(OffsetValue);
- OperandBundleDefT<Value *> AlignOpB("align", Vals);
- return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB});
+CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(
+ const DataLayout &DL, Value *PtrValue, Value *Mask, Type *IntPtrTy,
+ Value *OffsetValue, Value **TheCheck) {
+ Value *PtrIntValue = CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
+
+ if (OffsetValue) {
+ bool IsOffsetZero = false;
+ if (const auto *CI = dyn_cast<ConstantInt>(OffsetValue))
+ IsOffsetZero = CI->isZero();
+
+ if (!IsOffsetZero) {
+ if (OffsetValue->getType() != IntPtrTy)
+ OffsetValue = CreateIntCast(OffsetValue, IntPtrTy, /*isSigned*/ true,
+ "offsetcast");
+ PtrIntValue = CreateSub(PtrIntValue, OffsetValue, "offsetptr");
+ }
+ }
+
+ Value *Zero = ConstantInt::get(IntPtrTy, 0);
+ Value *MaskedPtr = CreateAnd(PtrIntValue, Mask, "maskedptr");
+ Value *InvCond = CreateICmpEQ(MaskedPtr, Zero, "maskcond");
+ if (TheCheck)
+ *TheCheck = InvCond;
+
+ return CreateAssumption(InvCond);
}
-CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
- Value *PtrValue,
- unsigned Alignment,
- Value *OffsetValue) {
+CallInst *IRBuilderBase::CreateAlignmentAssumption(
+ const DataLayout &DL, Value *PtrValue, unsigned Alignment,
+ Value *OffsetValue, Value **TheCheck) {
assert(isa<PointerType>(PtrValue->getType()) &&
"trying to create an alignment assumption on a non-pointer?");
assert(Alignment != 0 && "Invalid Alignment");
auto *PtrTy = cast<PointerType>(PtrValue->getType());
Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
- Value *AlignValue = ConstantInt::get(IntPtrTy, Alignment);
- return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue);
+
+ Value *Mask = ConstantInt::get(IntPtrTy, Alignment - 1);
+ return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,
+ OffsetValue, TheCheck);
}
-CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
- Value *PtrValue,
- Value *Alignment,
- Value *OffsetValue) {
+CallInst *IRBuilderBase::CreateAlignmentAssumption(
+ const DataLayout &DL, Value *PtrValue, Value *Alignment,
+ Value *OffsetValue, Value **TheCheck) {
assert(isa<PointerType>(PtrValue->getType()) &&
"trying to create an alignment assumption on a non-pointer?");
- return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue);
+ auto *PtrTy = cast<PointerType>(PtrValue->getType());
+ Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
+
+ if (Alignment->getType() != IntPtrTy)
+ Alignment = CreateIntCast(Alignment, IntPtrTy, /*isSigned*/ false,
+ "alignmentcast");
+
+ Value *Mask = CreateSub(Alignment, ConstantInt::get(IntPtrTy, 1), "mask");
+
+ return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,
+ OffsetValue, TheCheck);
}
IRBuilderDefaultInserter::~IRBuilderDefaultInserter() {}
Assert(Elem.Tag->getKey() == "ignore" ||
Attribute::isExistingAttribute(Elem.Tag->getKey()),
"tags must be valid attribute names");
+ Assert(Elem.End - Elem.Begin <= 2, "to many arguments");
Attribute::AttrKind Kind =
Attribute::getAttrKindFromName(Elem.Tag->getKey());
- unsigned ArgCount = Elem.End - Elem.Begin;
- if (Kind == Attribute::Alignment) {
- Assert(ArgCount <= 3 && ArgCount >= 2,
- "alignment assumptions should have 2 or 3 arguments");
- Assert(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
- "first argument should be a pointer");
- Assert(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
- "second argument should be an integer");
- if (ArgCount == 3)
- Assert(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
- "third argument should be an integer if present");
- return;
- }
- Assert(ArgCount <= 2, "to many arguments");
if (Kind == Attribute::None)
break;
if (Attribute::doesAttrKindHaveArgument(Kind)) {
- Assert(ArgCount == 2, "this attribute should have 2 arguments");
+ Assert(Elem.End - Elem.Begin == 2,
+ "this attribute should have 2 arguments");
Assert(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
"the second argument should be a constant integral value");
} else if (isFuncOnlyAttr(Kind)) {
- Assert((ArgCount) == 0, "this attribute has no argument");
+ Assert((Elem.End - Elem.Begin) == 0, "this attribute has no argument");
} else if (!isFuncOrArgAttr(Kind)) {
- Assert((ArgCount) == 1, "this attribute should have one argument");
+ Assert((Elem.End - Elem.Begin) == 1,
+ "this attribute should have one argument");
}
}
break;
break;
case Intrinsic::assume: {
Value *IIOperand = II->getArgOperand(0);
- SmallVector<OperandBundleDef, 4> OpBundles;
- II->getOperandBundlesAsDefs(OpBundles);
- bool HasOpBundles = !OpBundles.empty();
// Remove an assume if it is followed by an identical assume.
// TODO: Do we need this? Unless there are conflicting assumptions, the
// computeKnownBits(IIOperand) below here eliminates redundant assumes.
Instruction *Next = II->getNextNonDebugInstruction();
- if (HasOpBundles &&
- match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))) &&
- !cast<IntrinsicInst>(Next)->hasOperandBundles())
+ if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))))
return eraseInstFromFunction(CI);
// Canonicalize assume(a && b) -> assume(a); assume(b);
Value *AssumeIntrinsic = II->getCalledOperand();
Value *A, *B;
if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
- Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, OpBundles,
- II->getName());
+ Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, II->getName());
Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName());
return eraseInstFromFunction(*II);
}
// assume(!(a || b)) -> assume(!a); assume(!b);
if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
- Builder.CreateNot(A), OpBundles, II->getName());
+ Builder.CreateNot(A), II->getName());
Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
Builder.CreateNot(B), II->getName());
return eraseInstFromFunction(*II);
isValidAssumeForContext(II, LHS, &DT)) {
MDNode *MD = MDNode::get(II->getContext(), None);
LHS->setMetadata(LLVMContext::MD_nonnull, MD);
- if (!HasOpBundles)
- return eraseInstFromFunction(*II);
+ return eraseInstFromFunction(*II);
// TODO: apply nonnull return attributes to calls and invokes
// TODO: apply range metadata for range check patterns?
Value *&AAPtr,
const SCEV *&AlignSCEV,
const SCEV *&OffSCEV) {
- Type *Int64Ty = Type::getInt64Ty(I->getContext());
- Optional<OperandBundleUse> AlignOB = I->getOperandBundle("align");
- if (AlignOB.hasValue()) {
- assert(AlignOB.getValue().Inputs.size() >= 2);
- AAPtr = AlignOB.getValue().Inputs[0].get();
- // TODO: Consider accumulating the offset to the base.
- AAPtr = AAPtr->stripPointerCastsSameRepresentation();
- AlignSCEV = SE->getSCEV(AlignOB.getValue().Inputs[1].get());
- AlignSCEV = SE->getTruncateOrZeroExtend(AlignSCEV, Int64Ty);
- if (AlignOB.getValue().Inputs.size() == 3)
- OffSCEV = SE->getSCEV(AlignOB.getValue().Inputs[2].get());
- else
- OffSCEV = SE->getZero(Int64Ty);
- OffSCEV = SE->getTruncateOrZeroExtend(OffSCEV, Int64Ty);
- return true;
+ // An alignment assume must be a statement about the least-significant
+ // bits of the pointer being zero, possibly with some offset.
+ ICmpInst *ICI = dyn_cast<ICmpInst>(I->getArgOperand(0));
+ if (!ICI)
+ return false;
+
+ // This must be an expression of the form: x & m == 0.
+ if (ICI->getPredicate() != ICmpInst::ICMP_EQ)
+ return false;
+
+ // Swap things around so that the RHS is 0.
+ Value *CmpLHS = ICI->getOperand(0);
+ Value *CmpRHS = ICI->getOperand(1);
+ const SCEV *CmpLHSSCEV = SE->getSCEV(CmpLHS);
+ const SCEV *CmpRHSSCEV = SE->getSCEV(CmpRHS);
+ if (CmpLHSSCEV->isZero())
+ std::swap(CmpLHS, CmpRHS);
+ else if (!CmpRHSSCEV->isZero())
+ return false;
+
+ BinaryOperator *CmpBO = dyn_cast<BinaryOperator>(CmpLHS);
+ if (!CmpBO || CmpBO->getOpcode() != Instruction::And)
+ return false;
+
+ // Swap things around so that the right operand of the and is a constant
+ // (the mask); we cannot deal with variable masks.
+ Value *AndLHS = CmpBO->getOperand(0);
+ Value *AndRHS = CmpBO->getOperand(1);
+ const SCEV *AndLHSSCEV = SE->getSCEV(AndLHS);
+ const SCEV *AndRHSSCEV = SE->getSCEV(AndRHS);
+ if (isa<SCEVConstant>(AndLHSSCEV)) {
+ std::swap(AndLHS, AndRHS);
+ std::swap(AndLHSSCEV, AndRHSSCEV);
}
- return false;
+
+ const SCEVConstant *MaskSCEV = dyn_cast<SCEVConstant>(AndRHSSCEV);
+ if (!MaskSCEV)
+ return false;
+
+ // The mask must have some trailing ones (otherwise the condition is
+ // trivial and tells us nothing about the alignment of the left operand).
+ unsigned TrailingOnes = MaskSCEV->getAPInt().countTrailingOnes();
+ if (!TrailingOnes)
+ return false;
+
+ // Cap the alignment at the maximum with which LLVM can deal (and make sure
+ // we don't overflow the shift).
+ uint64_t Alignment;
+ TrailingOnes = std::min(TrailingOnes,
+ unsigned(sizeof(unsigned) * CHAR_BIT - 1));
+ Alignment = std::min(1u << TrailingOnes, +Value::MaximumAlignment);
+
+ Type *Int64Ty = Type::getInt64Ty(I->getParent()->getParent()->getContext());
+ AlignSCEV = SE->getConstant(Int64Ty, Alignment);
+
+ // The LHS might be a ptrtoint instruction, or it might be the pointer
+ // with an offset.
+ AAPtr = nullptr;
+ OffSCEV = nullptr;
+ if (PtrToIntInst *PToI = dyn_cast<PtrToIntInst>(AndLHS)) {
+ AAPtr = PToI->getPointerOperand();
+ OffSCEV = SE->getZero(Int64Ty);
+ } else if (const SCEVAddExpr* AndLHSAddSCEV =
+ dyn_cast<SCEVAddExpr>(AndLHSSCEV)) {
+ // Try to find the ptrtoint; subtract it and the rest is the offset.
+ for (SCEVAddExpr::op_iterator J = AndLHSAddSCEV->op_begin(),
+ JE = AndLHSAddSCEV->op_end(); J != JE; ++J)
+ if (const SCEVUnknown *OpUnk = dyn_cast<SCEVUnknown>(*J))
+ if (PtrToIntInst *PToI = dyn_cast<PtrToIntInst>(OpUnk->getValue())) {
+ AAPtr = PToI->getPointerOperand();
+ OffSCEV = SE->getMinusSCEV(AndLHSAddSCEV, *J);
+ break;
+ }
+ }
+
+ if (!AAPtr)
+ return false;
+
+ // Sign extend the offset to 64 bits (so that it is like all of the other
+ // expressions).
+ unsigned OffSCEVBits = OffSCEV->getType()->getPrimitiveSizeInBits();
+ if (OffSCEVBits < 64)
+ OffSCEV = SE->getSignExtendExpr(OffSCEV, Int64Ty);
+ else if (OffSCEVBits > 64)
+ return false;
+
+ AAPtr = AAPtr->stripPointerCasts();
+ return true;
}
bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) {
continue;
if (Instruction *K = dyn_cast<Instruction>(J))
+ if (isValidAssumeForContext(ACall, K, DT))
WorkList.push_back(K);
}
while (!WorkList.empty()) {
Instruction *J = WorkList.pop_back_val();
if (LoadInst *LI = dyn_cast<LoadInst>(J)) {
- if (!isValidAssumeForContext(ACall, J, DT))
- continue;
Align NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
LI->getPointerOperand(), SE);
if (NewAlignment > LI->getAlign()) {
++NumLoadAlignChanged;
}
} else if (StoreInst *SI = dyn_cast<StoreInst>(J)) {
- if (!isValidAssumeForContext(ACall, J, DT))
- continue;
Align NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
SI->getPointerOperand(), SE);
if (NewAlignment > SI->getAlign()) {
++NumStoreAlignChanged;
}
} else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(J)) {
- if (!isValidAssumeForContext(ACall, J, DT))
- continue;
Align NewDestAlignment =
getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MI->getDest(), SE);
Visited.insert(J);
for (User *UJ : J->users()) {
Instruction *K = cast<Instruction>(UJ);
- if (!Visited.count(K))
+ if (!Visited.count(K) && isValidAssumeForContext(ACall, K, DT))
WorkList.push_back(K);
}
}
define i32 @foo(i32* nocapture %a) nounwind uwtable readonly {
entry:
- tail call void @llvm.assume(i1 true) ["align"(i32* %a, i32 32)]
+ %ptrint = ptrtoint i32* %a to i64
+ %maskedptr = and i64 %ptrint, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
%0 = load i32, i32* %a, align 4
ret i32 %0
define i32 @foo2(i32* nocapture %a) nounwind uwtable readonly {
entry:
- tail call void @llvm.assume(i1 true) ["align"(i32* %a, i32 32, i32 24)]
+ %ptrint = ptrtoint i32* %a to i64
+ %offsetptr = add i64 %ptrint, 24
+ %maskedptr = and i64 %offsetptr, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
%arrayidx = getelementptr inbounds i32, i32* %a, i64 2
%0 = load i32, i32* %arrayidx, align 4
ret i32 %0
define i32 @foo2a(i32* nocapture %a) nounwind uwtable readonly {
entry:
- tail call void @llvm.assume(i1 true) ["align"(i32* %a, i32 32, i32 28)]
+ %ptrint = ptrtoint i32* %a to i64
+ %offsetptr = add i64 %ptrint, 28
+ %maskedptr = and i64 %offsetptr, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
%arrayidx = getelementptr inbounds i32, i32* %a, i64 -1
%0 = load i32, i32* %arrayidx, align 4
ret i32 %0
define i32 @goo(i32* nocapture %a) nounwind uwtable readonly {
entry:
- tail call void @llvm.assume(i1 true) ["align"(i32* %a, i32 32, i32 0)]
+ %ptrint = ptrtoint i32* %a to i64
+ %maskedptr = and i64 %ptrint, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
%0 = load i32, i32* %a, align 4
ret i32 %0
define i32 @hoo(i32* nocapture %a) nounwind uwtable readonly {
entry:
- tail call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32, i32 0)]
+ %ptrint = ptrtoint i32* %a to i64
+ %maskedptr = and i64 %ptrint, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
br label %for.body
for.body: ; preds = %entry, %for.body
; load(a, i0+i1+i2+32)
define void @hoo2(i32* nocapture %a, i64 %id, i64 %num) nounwind uwtable readonly {
entry:
- tail call void @llvm.assume(i1 true) ["align"(i32* %a, i8 32, i64 0)]
+ %ptrint = ptrtoint i32* %a to i64
+ %maskedptr = and i64 %ptrint, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
%id.mul = shl nsw i64 %id, 6
%num.mul = shl nsw i64 %num, 6
br label %for0.body
define i32 @joo(i32* nocapture %a) nounwind uwtable readonly {
entry:
- tail call void @llvm.assume(i1 true) ["align"(i32* %a, i8 32, i8 0)]
+ %ptrint = ptrtoint i32* %a to i64
+ %maskedptr = and i64 %ptrint, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
br label %for.body
for.body: ; preds = %entry, %for.body
define i32 @koo(i32* nocapture %a) nounwind uwtable readonly {
entry:
+ %ptrint = ptrtoint i32* %a to i64
+ %maskedptr = and i64 %ptrint, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
br label %for.body
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- tail call void @llvm.assume(i1 true) ["align"(i32* %a, i8 32, i8 0)]
%0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %r.06
%indvars.iv.next = add i64 %indvars.iv, 4
define i32 @koo2(i32* nocapture %a) nounwind uwtable readonly {
entry:
- tail call void @llvm.assume(i1 true) ["align"(i32* %a, i128 32, i128 0)]
+ %ptrint = ptrtoint i32* %a to i64
+ %maskedptr = and i64 %ptrint, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
br label %for.body
for.body: ; preds = %entry, %for.body
define i32 @moo(i32* nocapture %a) nounwind uwtable {
entry:
- tail call void @llvm.assume(i1 true) ["align"(i32* %a, i16 32)]
+ %ptrint = ptrtoint i32* %a to i64
+ %maskedptr = and i64 %ptrint, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
%0 = bitcast i32* %a to i8*
tail call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 64, i1 false)
ret i32 undef
define i32 @moo2(i32* nocapture %a, i32* nocapture %b) nounwind uwtable {
entry:
- tail call void @llvm.assume(i1 true) ["align"(i32* %b, i32 128)]
+ %ptrint = ptrtoint i32* %a to i64
+ %maskedptr = and i64 %ptrint, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
+ %ptrint1 = ptrtoint i32* %b to i64
+ %maskedptr3 = and i64 %ptrint1, 127
+ %maskcond4 = icmp eq i64 %maskedptr3, 0
+ tail call void @llvm.assume(i1 %maskcond4)
%0 = bitcast i32* %a to i8*
- tail call void @llvm.assume(i1 true) ["align"(i8* %0, i16 32)]
%1 = bitcast i32* %b to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 64, i1 false)
ret i32 undef
; CHECK-LABEL: define {{[^@]+}}@foo
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]
+; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 32
; CHECK-NEXT: ret i32 [[TMP0]]
;
entry:
- call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]
+ %ptrint = ptrtoint i32* %a to i64
+ %maskedptr = and i64 %ptrint, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
%0 = load i32, i32* %a, align 4
ret i32 %0
; CHECK-LABEL: define {{[^@]+}}@foo2
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32, i64 24) ]
+; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT: [[OFFSETPTR:%.*]] = add i64 [[PTRINT]], 24
+; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[OFFSETPTR]], 31
+; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 16
; CHECK-NEXT: ret i32 [[TMP0]]
;
entry:
- call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32, i64 24)]
+ %ptrint = ptrtoint i32* %a to i64
+ %offsetptr = add i64 %ptrint, 24
+ %maskedptr = and i64 %offsetptr, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
%arrayidx = getelementptr inbounds i32, i32* %a, i64 2
%0 = load i32, i32* %arrayidx, align 4
ret i32 %0
; CHECK-LABEL: define {{[^@]+}}@foo2a
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32, i64 28) ]
+; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT: [[OFFSETPTR:%.*]] = add i64 [[PTRINT]], 28
+; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[OFFSETPTR]], 31
+; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 -1
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 32
; CHECK-NEXT: ret i32 [[TMP0]]
;
entry:
- call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32, i64 28)]
+ %ptrint = ptrtoint i32* %a to i64
+ %offsetptr = add i64 %ptrint, 28
+ %maskedptr = and i64 %offsetptr, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
%arrayidx = getelementptr inbounds i32, i32* %a, i64 -1
%0 = load i32, i32* %arrayidx, align 4
ret i32 %0
; CHECK-LABEL: define {{[^@]+}}@goo
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]
+; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 32
; CHECK-NEXT: ret i32 [[TMP0]]
;
entry:
- call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]
+ %ptrint = ptrtoint i32* %a to i64
+ %maskedptr = and i64 %ptrint, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
%0 = load i32, i32* %a, align 4
ret i32 %0
; CHECK-LABEL: define {{[^@]+}}@hoo
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]
+; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: ret i32 [[ADD_LCSSA]]
;
entry:
- call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]
+ %ptrint = ptrtoint i32* %a to i64
+ %maskedptr = and i64 %ptrint, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
br label %for.body
for.body: ; preds = %entry, %for.body
; CHECK-LABEL: define {{[^@]+}}@joo
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]
+; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 4, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: ret i32 [[ADD_LCSSA]]
;
entry:
- call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]
+ %ptrint = ptrtoint i32* %a to i64
+ %maskedptr = and i64 %ptrint, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
br label %for.body
for.body: ; preds = %entry, %for.body
; CHECK-LABEL: define {{[^@]+}}@koo
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]
+; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: ret i32 [[ADD_LCSSA]]
;
entry:
- call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]
+ %ptrint = ptrtoint i32* %a to i64
+ %maskedptr = and i64 %ptrint, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
br label %for.body
for.body: ; preds = %entry, %for.body
; CHECK-LABEL: define {{[^@]+}}@koo2
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]
+; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ -4, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: ret i32 [[ADD_LCSSA]]
;
entry:
- call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]
+ %ptrint = ptrtoint i32* %a to i64
+ %maskedptr = and i64 %ptrint, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
br label %for.body
for.body: ; preds = %entry, %for.body
; CHECK-LABEL: define {{[^@]+}}@moo
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #1
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]
+; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[A]] to i8*
; CHECK-NEXT: tail call void @llvm.memset.p0i8.i64(i8* align 32 [[TMP0]], i8 0, i64 64, i1 false)
; CHECK-NEXT: ret i32 undef
;
entry:
- call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]
+ %ptrint = ptrtoint i32* %a to i64
+ %maskedptr = and i64 %ptrint, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
%0 = bitcast i32* %a to i8*
tail call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 64, i1 false)
ret i32 undef
; CHECK-LABEL: define {{[^@]+}}@moo2
; CHECK-SAME: (i32* nocapture [[A:%.*]], i32* nocapture [[B:%.*]]) #1
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[B]], i64 128) ]
+; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
+; CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint i32* [[B]] to i64
+; CHECK-NEXT: [[MASKEDPTR3:%.*]] = and i64 [[PTRINT1]], 127
+; CHECK-NEXT: [[MASKCOND4:%.*]] = icmp eq i64 [[MASKEDPTR3]], 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND4]])
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[A]] to i8*
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[B]] to i8*
; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 32 [[TMP0]], i8* align 128 [[TMP1]], i64 64, i1 false)
; CHECK-NEXT: ret i32 undef
;
entry:
- call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]
- call void @llvm.assume(i1 true) ["align"(i32* %b, i64 128)]
+ %ptrint = ptrtoint i32* %a to i64
+ %maskedptr = and i64 %ptrint, 31
+ %maskcond = icmp eq i64 %maskedptr, 0
+ tail call void @llvm.assume(i1 %maskcond)
+ %ptrint1 = ptrtoint i32* %b to i64
+ %maskedptr3 = and i64 %ptrint1, 127
+ %maskcond4 = icmp eq i64 %maskedptr3, 0
+ tail call void @llvm.assume(i1 %maskcond4)
%0 = bitcast i32* %a to i8*
%1 = bitcast i32* %b to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 64, i1 false)
; CHECK-LABEL: define {{[^@]+}}@foo
; CHECK-SAME: (float* nocapture [[A:%.*]], float* nocapture readonly [[C:%.*]]) #0
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[A]], i64 128) ]
+; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint float* [[A]] to i64
+; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 127
+; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[C]], align 4
; CHECK-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A]], i64 5
; CHECK-NEXT: store float [[TMP0]], float* [[ARRAYIDX_I]], align 4
; CHECK-LABEL: define {{[^@]+}}@foo2
; CHECK-SAME: (float* nocapture [[A:%.*]], float* nocapture [[B:%.*]], float* nocapture readonly [[C:%.*]]) #0
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[A]], i64 128) ]
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[B]], i64 128) ]
+; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint float* [[A]] to i64
+; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 127
+; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
+; CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint float* [[B]] to i64
+; CHECK-NEXT: [[MASKEDPTR2:%.*]] = and i64 [[PTRINT1]], 127
+; CHECK-NEXT: [[MASKCOND3:%.*]] = icmp eq i64 [[MASKEDPTR2]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND3]])
; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[C]], align 4
; CHECK-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A]], i64 5
; CHECK-NEXT: store float [[TMP0]], float* [[ARRAYIDX_I]], align 4
define void @debug_interference(i8 %x) {
; CHECK-LABEL: @debug_interference(
; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i8 [[X:%.*]], 0
-; CHECK-NEXT: tail call void @llvm.assume(i1 false)
; CHECK-NEXT: tail call void @llvm.dbg.value(metadata i32 5, metadata !7, metadata !DIExpression()), !dbg !9
; CHECK-NEXT: tail call void @llvm.assume(i1 false)
; CHECK-NEXT: tail call void @llvm.dbg.value(metadata i32 5, metadata !7, metadata !DIExpression()), !dbg !9
; ASSUMPTIONS-ON-NEXT: br i1 [[C:%.*]], label [[TRUE2_CRITEDGE:%.*]], label [[FALSE1:%.*]]
; ASSUMPTIONS-ON: false1:
; ASSUMPTIONS-ON-NEXT: store volatile i64 1, i64* [[PTR:%.*]], align 8
-; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 true) [ "align"(i64* [[PTR]], i64 8) ]
+; ASSUMPTIONS-ON-NEXT: [[PTRINT:%.*]] = ptrtoint i64* [[PTR]] to i64
+; ASSUMPTIONS-ON-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 7
+; ASSUMPTIONS-ON-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; ASSUMPTIONS-ON-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
; ASSUMPTIONS-ON-NEXT: store volatile i64 0, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT: store volatile i64 3, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT: ret void
; ASSUMPTIONS-ON: true2.critedge:
-; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 true) [ "align"(i64* [[PTR]], i64 8) ]
+; ASSUMPTIONS-ON-NEXT: [[PTRINT_C:%.*]] = ptrtoint i64* [[PTR]] to i64
+; ASSUMPTIONS-ON-NEXT: [[MASKEDPTR_C:%.*]] = and i64 [[PTRINT_C]], 7
+; ASSUMPTIONS-ON-NEXT: [[MASKCOND_C:%.*]] = icmp eq i64 [[MASKEDPTR_C]], 0
+; ASSUMPTIONS-ON-NEXT: tail call void @llvm.assume(i1 [[MASKCOND_C]])
; ASSUMPTIONS-ON-NEXT: store volatile i64 0, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-OFF-NEXT: ret void
;
; ASSUMPTIONS-ON-LABEL: @caller2(
+; ASSUMPTIONS-ON-NEXT: [[ALLOCA:%.*]] = alloca i64, align 8, addrspace(5)
+; ASSUMPTIONS-ON-NEXT: [[CAST:%.*]] = addrspacecast i64 addrspace(5)* [[ALLOCA]] to i64*
+; ASSUMPTIONS-ON-NEXT: [[PTRINT:%.*]] = ptrtoint i64* [[CAST]] to i64
+; ASSUMPTIONS-ON-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 7
+; ASSUMPTIONS-ON-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
; ASSUMPTIONS-ON-NEXT: ret void
;
%alloca = alloca i64, align 8, addrspace(5)
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: not opt -verify < %s 2>&1 | FileCheck %s
declare void @llvm.assume(i1)
; CHECK: tags must be valid attribute names
call void @llvm.assume(i1 true) ["adazdazd"()]
; CHECK: the second argument should be a constant integral value
- call void @llvm.assume(i1 true) ["dereferenceable"(i32* %P, i32 %P1)]
+ call void @llvm.assume(i1 true) ["align"(i32* %P, i32 %P1)]
; CHECK: to many arguments
- call void @llvm.assume(i1 true) ["dereferenceable"(i32* %P, i32 8, i32 8)]
+ call void @llvm.assume(i1 true) ["align"(i32* %P, i32 8, i32 8)]
; CHECK: this attribute should have 2 arguments
- call void @llvm.assume(i1 true) ["dereferenceable"(i32* %P)]
+ call void @llvm.assume(i1 true) ["align"(i32* %P)]
; CHECK: this attribute has no argument
- call void @llvm.assume(i1 true) ["dereferenceable"(i32* %P, i32 4), "cold"(i32* %P)]
+ call void @llvm.assume(i1 true) ["align"(i32* %P, i32 4), "cold"(i32* %P)]
; CHECK: this attribute should have one argument
call void @llvm.assume(i1 true) ["noalias"()]
- call void @llvm.assume(i1 true) ["align"(i32* %P, i32 %P1, i32 4)]
-; CHECK: alignment assumptions should have 2 or 3 arguments
- call void @llvm.assume(i1 true) ["align"(i32* %P, i32 %P1, i32 4, i32 4)]
-; CHECK: second argument should be an integer
- call void @llvm.assume(i1 true) ["align"(i32* %P, i32* %P2)]
-; CHECK: third argument should be an integer if present
- call void @llvm.assume(i1 true) ["align"(i32* %P, i32 %P1, i32* %P2)]
ret void
}
ASSERT_EQ(AR[0].Index, 1u);
ASSERT_EQ(AR[0].Assume, &*First);
}
-
-TEST(AssumeQueryAPI, Alignment) {
- LLVMContext C;
- SMDiagnostic Err;
- std::unique_ptr<Module> Mod = parseAssemblyString(
- "declare void @llvm.assume(i1)\n"
- "define void @test(i32* %P, i32* %P1, i32* %P2, i32 %I3, i1 %B) {\n"
- "call void @llvm.assume(i1 true) [\"align\"(i32* %P, i32 8, i32 %I3)]\n"
- "call void @llvm.assume(i1 true) [\"align\"(i32* %P1, i32 %I3, i32 "
- "%I3)]\n"
- "call void @llvm.assume(i1 true) [\"align\"(i32* %P2, i32 16, i32 8)]\n"
- "ret void\n}\n",
- Err, C);
- if (!Mod)
- Err.print("AssumeQueryAPI", errs());
-
- Function *F = Mod->getFunction("test");
- BasicBlock::iterator Start = F->begin()->begin();
- IntrinsicInst *II;
- RetainedKnowledge RK;
- II = cast<IntrinsicInst>(&*Start);
- RK = getKnowledgeFromBundle(*II, II->bundle_op_info_begin()[0]);
- ASSERT_EQ(RK.AttrKind, Attribute::Alignment);
- ASSERT_EQ(RK.WasOn, F->getArg(0));
- ASSERT_EQ(RK.ArgValue, 1u);
- Start++;
- II = cast<IntrinsicInst>(&*Start);
- RK = getKnowledgeFromBundle(*II, II->bundle_op_info_begin()[0]);
- ASSERT_EQ(RK.AttrKind, Attribute::Alignment);
- ASSERT_EQ(RK.WasOn, F->getArg(1));
- ASSERT_EQ(RK.ArgValue, 1u);
- Start++;
- II = cast<IntrinsicInst>(&*Start);
- RK = getKnowledgeFromBundle(*II, II->bundle_op_info_begin()[0]);
- ASSERT_EQ(RK.AttrKind, Attribute::Alignment);
- ASSERT_EQ(RK.WasOn, F->getArg(2));
- ASSERT_EQ(RK.ArgValue, 8u);
-}