target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128-ni:1-p2:32:8:8:32-ni:2"
target triple = "x86_64-unknown-linux-gnu"
-define i32 @test0(i32* %p1, i8* %p2, i32* %p3, i8* %p4, i8* %p5, i1 %c, i32 %x) {
+define i32 @test0(ptr %p1, ptr %p2, ptr %p3, ptr %p4, ptr %p5, i1 %c, i32 %x) {
; CHECK-LABEL: @test0(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[P1_1:%.*]] = getelementptr i32, i32* [[P1:%.*]], i64 1
-; CHECK-NEXT: [[P1_2:%.*]] = getelementptr i32, i32* [[P1]], i64 2
-; CHECK-NEXT: [[P1_3:%.*]] = getelementptr i32, i32* [[P1]], i64 3
-; CHECK-NEXT: [[IV_1_START:%.*]] = load i32, i32* [[P1_1]], align 4, !range [[RNG0:![0-9]+]]
-; CHECK-NEXT: [[IV_1_END:%.*]] = load i32, i32* [[P1_2]], align 4, !range [[RNG0]]
-; CHECK-NEXT: [[IV_2_END:%.*]] = load i32, i32* [[P1_3]], align 4, !range [[RNG0]]
+; CHECK-NEXT: [[P1_1:%.*]] = getelementptr i32, ptr [[P1:%.*]], i64 1
+; CHECK-NEXT: [[P1_2:%.*]] = getelementptr i32, ptr [[P1]], i64 2
+; CHECK-NEXT: [[P1_3:%.*]] = getelementptr i32, ptr [[P1]], i64 3
+; CHECK-NEXT: [[IV_1_START:%.*]] = load i32, ptr [[P1_1]], align 4, !range [[RNG0:![0-9]+]]
+; CHECK-NEXT: [[IV_1_END:%.*]] = load i32, ptr [[P1_2]], align 4, !range [[RNG0]]
+; CHECK-NEXT: [[IV_2_END:%.*]] = load i32, ptr [[P1_3]], align 4, !range [[RNG0]]
; CHECK-NEXT: [[LOOP_COND:%.*]] = icmp ult i32 [[IV_2_END]], [[IV_1_END]]
; CHECK-NEXT: br i1 [[LOOP_COND]], label [[LOOP_PREHEADER:%.*]], label [[EXIT:%.*]]
; CHECK: loop.preheader:
; CHECK: loop:
; CHECK-NEXT: [[IV_1:%.*]] = phi i32 [ [[IV_1_NEXT:%.*]], [[LOOP_NEXT:%.*]] ], [ [[IV_1_START]], [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[IV_2:%.*]] = phi i32 [ [[IV_2_NEXT:%.*]], [[LOOP_NEXT]] ], [ 0, [[LOOP_PREHEADER]] ]
-; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr i8, i8* [[P2:%.*]], i32 [[IV_1]]
-; CHECK-NEXT: [[VALUE:%.*]] = load i8, i8* [[GEP_1]], align 1
+; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr i8, ptr [[P2:%.*]], i32 [[IV_1]]
+; CHECK-NEXT: [[VALUE:%.*]] = load i8, ptr [[GEP_1]], align 1
; CHECK-NEXT: [[COND_1:%.*]] = icmp ult i32 [[IV_1]], [[IV_1_END]]
; CHECK-NEXT: [[WC:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: [[TMP5:%.*]] = and i1 [[TMP4]], [[WC]]
; CHECK-NEXT: br i1 [[TMP5]], label [[LOOP_NEXT]], label [[DEOPT:%.*]]
; CHECK: loop.next:
; CHECK-NEXT: call void @llvm.assume(i1 [[COND_1]])
-; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr i8, i8* [[P4:%.*]], i32 [[IV_1]]
-; CHECK-NEXT: store i8 [[VALUE]], i8* [[GEP_3]], align 1
+; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr i8, ptr [[P4:%.*]], i32 [[IV_1]]
+; CHECK-NEXT: store i8 [[VALUE]], ptr [[GEP_3]], align 1
; CHECK-NEXT: [[IV_1_NEXT]] = add nuw nsw i32 [[IV_1]], 1
; CHECK-NEXT: [[IV_2_NEXT]] = add nuw nsw i32 [[IV_2]], 1
; CHECK-NEXT: [[LATCH_COND:%.*]] = icmp ult i32 [[IV_2]], [[IV_2_END]]
; CHECK-NEXT: ret i32 [[RES]]
;
entry:
- %p1.1 = getelementptr i32, i32* %p1, i64 1
- %p1.2 = getelementptr i32, i32* %p1, i64 2
- %p1.3 = getelementptr i32, i32* %p1, i64 3
- %iv.1.start = load i32, i32* %p1.1, !range !0
- %iv.1.end = load i32, i32* %p1.2, !range !0
- %iv.2.end = load i32, i32* %p1.3, !range !0
+ %p1.1 = getelementptr i32, ptr %p1, i64 1
+ %p1.2 = getelementptr i32, ptr %p1, i64 2
+ %p1.3 = getelementptr i32, ptr %p1, i64 3
+ %iv.1.start = load i32, ptr %p1.1, !range !0
+ %iv.1.end = load i32, ptr %p1.2, !range !0
+ %iv.2.end = load i32, ptr %p1.3, !range !0
%loop.cond = icmp ult i32 %iv.2.end, %iv.1.end
br i1 %loop.cond, label %loop, label %exit
loop:
%iv.1 = phi i32 [ %iv.1.start, %entry ], [ %iv.1.next, %latch ]
%iv.2 = phi i32 [ 0, %entry ], [ %iv.2.next, %latch ]
- %gep.1 = getelementptr i8, i8* %p2, i32 %iv.1
- %value = load i8, i8* %gep.1
+ %gep.1 = getelementptr i8, ptr %p2, i32 %iv.1
+ %value = load i8, ptr %gep.1
%cond.1 = icmp ult i32 %iv.1, %iv.1.end
%wc = call i1 @llvm.experimental.widenable.condition()
%explicit_guard_cond = and i1 %cond.1, %wc
br i1 %cond.1, label %if.true, label %if.false
if.true:
- %gep.3 = getelementptr i8, i8* %p4, i32 %iv.1
- store i8 %value, i8* %gep.3
+ %gep.3 = getelementptr i8, ptr %p4, i32 %iv.1
+ store i8 %value, ptr %gep.3
br label %latch
if.false:
- %gep.4 = getelementptr i8, i8* %p4, i32 %iv.2
- store i8 %value, i8* %gep.4
+ %gep.4 = getelementptr i8, ptr %p4, i32 %iv.2
+ store i8 %value, ptr %gep.4
br label %latch
latch:
declare void @llvm.experimental.guard(i1, ...)
-define i32 @unsigned_loop_0_to_n_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @unsigned_loop_0_to_n_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @unsigned_loop_0_to_n_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
ret i32 %result
}
-define i32 @unsigned_loop_0_to_n_ule_latch_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @unsigned_loop_0_to_n_ule_latch_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @unsigned_loop_0_to_n_ule_latch_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ule i32 [[I_NEXT]], [[N]]
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
ret i32 %result
}
-define i32 @unsigned_loop_0_to_n_ugt_check(i32* %array, i32 %length, i32 %n) {
+define i32 @unsigned_loop_0_to_n_ugt_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @unsigned_loop_0_to_n_ugt_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
ret i32 %result
}
-define i32 @signed_loop_0_to_n_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], [[N]]
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
ret i32 %result
}
-define i32 @signed_loop_0_to_n_ult_check_length_range_known(i32* %array, i32* %length.ptr, i32 %n) {
+define i32 @signed_loop_0_to_n_ult_check_length_range_known(ptr %array, ptr %length.ptr, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_ult_check_length_range_known(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
-; CHECK-NEXT: [[LENGTH:%.*]] = load i32, i32* [[LENGTH_PTR:%.*]], align 4, !range [[RNG0:![0-9]+]]
+; CHECK-NEXT: [[LENGTH:%.*]] = load i32, ptr [[LENGTH_PTR:%.*]], align 4, !range [[RNG0:![0-9]+]]
; CHECK-NEXT: br i1 [[TMP5]], label [[EXIT:%.*]], label [[LOOP_PREHEADER:%.*]]
; CHECK: loop.preheader:
; CHECK-NEXT: [[TMP0:%.*]] = icmp sle i32 [[N]], [[LENGTH]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP1]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], [[N]]
;
entry:
%tmp5 = icmp sle i32 %n, 0
- %length = load i32, i32* %length.ptr, !range !{i32 1, i32 2147483648}
+ %length = load i32, ptr %length.ptr, !range !{i32 1, i32 2147483648}
br i1 %tmp5, label %exit, label %loop.preheader
loop.preheader:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
ret i32 %result
}
-define i32 @signed_loop_0_to_n_inverse_latch_predicate(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_inverse_latch_predicate(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_inverse_latch_predicate(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp sgt i32 [[I_NEXT]], [[N]]
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
ret i32 %result
}
-define i32 @signed_loop_0_to_n_sle_latch_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_sle_latch_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_sle_latch_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp sle i32 [[I_NEXT]], [[N]]
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
ret i32 %result
}
-define i32 @signed_loop_0_to_n_preincrement_latch_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_preincrement_latch_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_preincrement_latch_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP3]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I]], [[N]]
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add i32 %i, 1
ret i32 %result
}
-define i32 @signed_loop_0_to_n_preincrement_latch_check_postincrement_guard_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_preincrement_latch_check_postincrement_guard_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_preincrement_latch_check_postincrement_guard_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP3]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I]], [[N]]
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%continue = icmp slt i32 %i, %n
ret i32 %result
}
-define i32 @signed_loop_0_to_n_sle_latch_offset_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_sle_latch_offset_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_sle_latch_offset_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP3]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp sle i32 [[I_NEXT]], [[N]]
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add i32 %i, 1
ret i32 %result
}
-define i32 @signed_loop_0_to_n_offset_sle_latch_offset_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_offset_sle_latch_offset_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_offset_sle_latch_offset_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
; CHECK-NEXT: [[I_NEXT_OFFSET:%.*]] = add i32 [[I_NEXT]], 1
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add i32 %i, 1
ret i32 %result
}
-define i32 @unsupported_latch_pred_loop_0_to_n(i32* %array, i32 %length, i32 %n) {
+define i32 @unsupported_latch_pred_loop_0_to_n(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @unsupported_latch_pred_loop_0_to_n(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[LENGTH:%.*]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ne i32 [[I_NEXT]], [[N]]
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nsw i32 %i, 1
ret i32 %result
}
-define i32 @signed_loop_0_to_n_unsupported_iv_step(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_unsupported_iv_step(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_unsupported_iv_step(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[LENGTH:%.*]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 2
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], [[N]]
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nsw i32 %i, 2
ret i32 %result
}
-define i32 @signed_loop_0_to_n_equal_iv_range_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_equal_iv_range_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_equal_iv_range_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[J_NEXT]] = add nsw i32 [[J]], 1
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%j.next = add nsw i32 %j, 1
ret i32 %result
}
-define i32 @signed_loop_start_to_n_offset_iv_range_check(i32* %array, i32 %start.i,
+define i32 @signed_loop_start_to_n_offset_iv_range_check(ptr %array, i32 %start.i,
; CHECK-LABEL: @signed_loop_start_to_n_offset_iv_range_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP4]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[J_NEXT]] = add i32 [[J]], 1
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%j.next = add i32 %j, 1
ret i32 %result
}
-define i32 @signed_loop_0_to_n_different_iv_types(i32* %array, i16 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_different_iv_types(ptr %array, i16 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_different_iv_types(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i16 [[J]], [[LENGTH:%.*]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[J_NEXT]] = add i16 [[J]], 1
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%j.next = add i16 %j, 1
ret i32 %result
}
-define i32 @signed_loop_0_to_n_different_iv_strides(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_different_iv_strides(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_different_iv_strides(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[J]], [[LENGTH:%.*]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[J_NEXT]] = add nsw i32 [[J]], 2
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%j.next = add nsw i32 %j, 2
ret i32 %result
}
-define i32 @two_range_checks(i32* %array.1, i32 %length.1,
+define i32 @two_range_checks(ptr %array.1, i32 %length.1,
; CHECK-LABEL: @two_range_checks(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP6]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_1:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, i32* [[ARRAY_1_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_1:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, ptr [[ARRAY_1_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_1:%.*]] = add i32 [[LOOP_ACC]], [[ARRAY_1_I]]
-; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_2:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, i32* [[ARRAY_2_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_2:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, ptr [[ARRAY_2_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC_1]], [[ARRAY_2_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[LOOP_ACC_NEXT_LCSSA]], [[EXIT_LOOPEXIT]] ]
; CHECK-NEXT: ret i32 [[RESULT]]
;
- i32* %array.2, i32 %length.2, i32 %n) {
+ ptr %array.2, i32 %length.2, i32 %n) {
entry:
%tmp5 = icmp eq i32 %n, 0
br i1 %tmp5, label %exit, label %loop.preheader
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.1.i.ptr = getelementptr inbounds i32, i32* %array.1, i64 %i.i64
- %array.1.i = load i32, i32* %array.1.i.ptr, align 4
+ %array.1.i.ptr = getelementptr inbounds i32, ptr %array.1, i64 %i.i64
+ %array.1.i = load i32, ptr %array.1.i.ptr, align 4
%loop.acc.1 = add i32 %loop.acc, %array.1.i
- %array.2.i.ptr = getelementptr inbounds i32, i32* %array.2, i64 %i.i64
- %array.2.i = load i32, i32* %array.2.i.ptr, align 4
+ %array.2.i.ptr = getelementptr inbounds i32, ptr %array.2, i64 %i.i64
+ %array.2.i = load i32, ptr %array.2.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc.1, %array.2.i
%i.next = add nuw i32 %i, 1
ret i32 %result
}
-define i32 @three_range_checks(i32* %array.1, i32 %length.1,
+define i32 @three_range_checks(ptr %array.1, i32 %length.1,
; CHECK-LABEL: @three_range_checks(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP10]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_1:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, i32* [[ARRAY_1_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_1:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, ptr [[ARRAY_1_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_1:%.*]] = add i32 [[LOOP_ACC]], [[ARRAY_1_I]]
-; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_2:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, i32* [[ARRAY_2_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_2:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, ptr [[ARRAY_2_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_2:%.*]] = add i32 [[LOOP_ACC_1]], [[ARRAY_2_I]]
-; CHECK-NEXT: [[ARRAY_3_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_3:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_3_I:%.*]] = load i32, i32* [[ARRAY_3_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_3_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_3:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_3_I:%.*]] = load i32, ptr [[ARRAY_3_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC_2]], [[ARRAY_3_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[LOOP_ACC_NEXT_LCSSA]], [[EXIT_LOOPEXIT]] ]
; CHECK-NEXT: ret i32 [[RESULT]]
;
- i32* %array.2, i32 %length.2,
- i32* %array.3, i32 %length.3, i32 %n) {
+ ptr %array.2, i32 %length.2,
+ ptr %array.3, i32 %length.3, i32 %n) {
entry:
%tmp5 = icmp eq i32 %n, 0
br i1 %tmp5, label %exit, label %loop.preheader
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.1.i.ptr = getelementptr inbounds i32, i32* %array.1, i64 %i.i64
- %array.1.i = load i32, i32* %array.1.i.ptr, align 4
+ %array.1.i.ptr = getelementptr inbounds i32, ptr %array.1, i64 %i.i64
+ %array.1.i = load i32, ptr %array.1.i.ptr, align 4
%loop.acc.1 = add i32 %loop.acc, %array.1.i
- %array.2.i.ptr = getelementptr inbounds i32, i32* %array.2, i64 %i.i64
- %array.2.i = load i32, i32* %array.2.i.ptr, align 4
+ %array.2.i.ptr = getelementptr inbounds i32, ptr %array.2, i64 %i.i64
+ %array.2.i = load i32, ptr %array.2.i.ptr, align 4
%loop.acc.2 = add i32 %loop.acc.1, %array.2.i
- %array.3.i.ptr = getelementptr inbounds i32, i32* %array.3, i64 %i.i64
- %array.3.i = load i32, i32* %array.3.i.ptr, align 4
+ %array.3.i.ptr = getelementptr inbounds i32, ptr %array.3, i64 %i.i64
+ %array.3.i = load i32, ptr %array.3.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc.2, %array.3.i
%i.next = add nuw i32 %i, 1
ret i32 %result
}
-define i32 @three_guards(i32* %array.1, i32 %length.1,
+define i32 @three_guards(ptr %array.1, i32 %length.1,
; CHECK-LABEL: @three_guards(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS_1]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_1:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, i32* [[ARRAY_1_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_1:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, ptr [[ARRAY_1_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_1:%.*]] = add i32 [[LOOP_ACC]], [[ARRAY_1_I]]
; CHECK-NEXT: [[WITHIN_BOUNDS_2:%.*]] = icmp ult i32 [[I]], [[LENGTH_2]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP5]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS_2]])
-; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_2:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, i32* [[ARRAY_2_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_2:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, ptr [[ARRAY_2_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_2:%.*]] = add i32 [[LOOP_ACC_1]], [[ARRAY_2_I]]
; CHECK-NEXT: [[WITHIN_BOUNDS_3:%.*]] = icmp ult i32 [[I]], [[LENGTH_3]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP8]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS_3]])
-; CHECK-NEXT: [[ARRAY_3_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_3:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_3_I:%.*]] = load i32, i32* [[ARRAY_3_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_3_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_3:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_3_I:%.*]] = load i32, ptr [[ARRAY_3_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC_2]], [[ARRAY_3_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[LOOP_ACC_NEXT_LCSSA]], [[EXIT_LOOPEXIT]] ]
; CHECK-NEXT: ret i32 [[RESULT]]
;
- i32* %array.2, i32 %length.2,
- i32* %array.3, i32 %length.3, i32 %n) {
+ ptr %array.2, i32 %length.2,
+ ptr %array.3, i32 %length.3, i32 %n) {
entry:
%tmp5 = icmp eq i32 %n, 0
br i1 %tmp5, label %exit, label %loop.preheader
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds.1, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.1.i.ptr = getelementptr inbounds i32, i32* %array.1, i64 %i.i64
- %array.1.i = load i32, i32* %array.1.i.ptr, align 4
+ %array.1.i.ptr = getelementptr inbounds i32, ptr %array.1, i64 %i.i64
+ %array.1.i = load i32, ptr %array.1.i.ptr, align 4
%loop.acc.1 = add i32 %loop.acc, %array.1.i
%within.bounds.2 = icmp ult i32 %i, %length.2
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds.2, i32 9) [ "deopt"() ]
- %array.2.i.ptr = getelementptr inbounds i32, i32* %array.2, i64 %i.i64
- %array.2.i = load i32, i32* %array.2.i.ptr, align 4
+ %array.2.i.ptr = getelementptr inbounds i32, ptr %array.2, i64 %i.i64
+ %array.2.i = load i32, ptr %array.2.i.ptr, align 4
%loop.acc.2 = add i32 %loop.acc.1, %array.2.i
%within.bounds.3 = icmp ult i32 %i, %length.3
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds.3, i32 9) [ "deopt"() ]
- %array.3.i.ptr = getelementptr inbounds i32, i32* %array.3, i64 %i.i64
- %array.3.i = load i32, i32* %array.3.i.ptr, align 4
+ %array.3.i.ptr = getelementptr inbounds i32, ptr %array.3, i64 %i.i64
+ %array.3.i = load i32, ptr %array.3.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc.2, %array.3.i
%i.next = add nuw i32 %i, 1
ret i32 %result
}
-define i32 @unsigned_loop_0_to_n_unrelated_condition(i32* %array, i32 %length, i32 %n, i32 %x) {
+define i32 @unsigned_loop_0_to_n_unrelated_condition(ptr %array, i32 %length, i32 %n, i32 %x) {
; CHECK-LABEL: @unsigned_loop_0_to_n_unrelated_condition(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP3]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[GUARD_COND]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
call void (i1, ...) @llvm.experimental.guard(i1 %guard.cond, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
}
; Don't change the guard condition if there were no widened subconditions
-define i32 @test_no_widened_conditions(i32* %array, i32 %length, i32 %n, i32 %x1, i32 %x2, i32 %x3) {
+define i32 @test_no_widened_conditions(ptr %array, i32 %length, i32 %n, i32 %x1, i32 %x2, i32 %x3) {
; CHECK-LABEL: @test_no_widened_conditions(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: [[GUARD_COND:%.*]] = and i1 [[UNRELATED_COND_AND_1]], [[UNRELATED_COND_3]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[GUARD_COND]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
call void (i1, ...) @llvm.experimental.guard(i1 %guard.cond, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
ret i32 %result
}
-define i32 @signed_loop_start_to_n_loop_variant_bound(i32* %array, i32 %x, i32 %start, i32 %n) {
+define i32 @signed_loop_start_to_n_loop_variant_bound(ptr %array, i32 %x, i32 %start, i32 %n) {
; CHECK-LABEL: @signed_loop_start_to_n_loop_variant_bound(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[BOUND]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], [[N]]
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nsw i32 %i, 1
ret i32 %result
}
-define i32 @signed_loop_start_to_n_non_monotonic_predicate(i32* %array, i32 %x, i32 %start, i32 %n) {
+define i32 @signed_loop_start_to_n_non_monotonic_predicate(ptr %array, i32 %x, i32 %start, i32 %n) {
; CHECK-LABEL: @signed_loop_start_to_n_non_monotonic_predicate(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: [[GUARD_COND:%.*]] = icmp eq i32 [[I]], [[X:%.*]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[GUARD_COND]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], [[N]]
call void (i1, ...) @llvm.experimental.guard(i1 %guard.cond, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nsw i32 %i, 1
ret i32 %result
}
-define i32 @unsigned_loop_0_to_n_hoist_length(i32* %array, i16 %length.i16, i32 %n) {
+define i32 @unsigned_loop_0_to_n_hoist_length(ptr %array, i16 %length.i16, i32 %n) {
; CHECK-LABEL: @unsigned_loop_0_to_n_hoist_length(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP3]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
ret i32 %result
}
-define i32 @unsigned_loop_0_to_n_cant_hoist_length(i32* %array, i32 %length, i32 %divider, i32 %n) {
+define i32 @unsigned_loop_0_to_n_cant_hoist_length(ptr %array, i32 %length, i32 %divider, i32 %n) {
; CHECK-LABEL: @unsigned_loop_0_to_n_cant_hoist_length(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
; This is a case where the length information tells us that the guard
; must trigger on some iteration.
-define i32 @provably_taken(i32* %array, i32* %length.ptr) {
+define i32 @provably_taken(ptr %array, ptr %length.ptr) {
; CHECK-LABEL: @provably_taken(
; CHECK-NEXT: loop.preheader:
-; CHECK-NEXT: [[LENGTH:%.*]] = load i32, i32* [[LENGTH_PTR:%.*]], align 4, !range [[RNG1:![0-9]+]]
+; CHECK-NEXT: [[LENGTH:%.*]] = load i32, ptr [[LENGTH_PTR:%.*]], align 4, !range [[RNG1:![0-9]+]]
; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 0, [[LENGTH]]
; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[TMP0]], false
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP1]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], 200
; CHECK-NEXT: ret i32 [[RESULT]]
;
loop.preheader:
- %length = load i32, i32* %length.ptr, !range !{i32 0, i32 50}
+ %length = load i32, ptr %length.ptr, !range !{i32 0, i32 50}
br label %loop
loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
; NE Check (as produced by LFTR) where we can prove Start < End via simple
; instruction analysis
-define i32 @ne_latch_zext(i32* %array, i32 %length, i16 %n16) {
+define i32 @ne_latch_zext(ptr %array, i32 %length, i16 %n16) {
; CHECK-LABEL: @ne_latch_zext(
; CHECK-NEXT: loop.preheader:
; CHECK-NEXT: [[N:%.*]] = zext i16 [[N16:%.*]] to i32
}
; Same as previous, but with a pre-increment test since this is easier to match
-define i32 @ne_latch_zext_preinc(i32* %array, i32 %length, i16 %n16) {
+define i32 @ne_latch_zext_preinc(ptr %array, i32 %length, i16 %n16) {
; CHECK-LABEL: @ne_latch_zext_preinc(
; CHECK-NEXT: loop.preheader:
; CHECK-NEXT: [[N:%.*]] = zext i16 [[N16:%.*]] to i32
; NE Check (as produced by LFTR) where we can prove Start < End via the
; condition guarding the loop entry.
-define i32 @ne_latch_dom_check(i32* %array, i32 %length, i32 %n) {
+define i32 @ne_latch_dom_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @ne_latch_dom_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
}
; Same as previous, but easier to match
-define i32 @ne_latch_dom_check_preinc(i32* %array, i32 %length, i32 %n) {
+define i32 @ne_latch_dom_check_preinc(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @ne_latch_dom_check_preinc(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
}
; Same as previous, except swapped br/cmp
-define i32 @eq_latch_dom_check_preinc(i32* %array, i32 %length, i32 %n) {
+define i32 @eq_latch_dom_check_preinc(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @eq_latch_dom_check_preinc(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; NE latch - can't prove (end-start) mod step == 0 (i.e. might wrap
; around several times or even be infinite)
-define i32 @neg_ne_latch_mod_step(i32* %array, i32 %length, i16 %n16) {
+define i32 @neg_ne_latch_mod_step(ptr %array, i32 %length, i16 %n16) {
; CHECK-LABEL: @neg_ne_latch_mod_step(
; CHECK-NEXT: loop.preheader:
; CHECK-NEXT: [[N:%.*]] = zext i16 [[N16:%.*]] to i32
}
; NE latch - TODO: could prove (end-start) mod step == 0
-define i32 @ne_latch_mod_step(i32* %array, i32 %length) {
+define i32 @ne_latch_mod_step(ptr %array, i32 %length) {
; CHECK-LABEL: @ne_latch_mod_step(
; CHECK-NEXT: loop.preheader:
; CHECK-NEXT: br label [[LOOP:%.*]]
}
; NE Latch - but end > start so wraps around and not equivelent to a ult
-define i32 @neg_ne_latch_swapped_order(i32* %array, i32 %length) {
+define i32 @neg_ne_latch_swapped_order(ptr %array, i32 %length) {
; CHECK-LABEL: @neg_ne_latch_swapped_order(
; CHECK-NEXT: loop.preheader:
; CHECK-NEXT: br label [[LOOP:%.*]]
; Negative test, make sure we don't crash on unconditional latches
; TODO: there's no reason we shouldn't be able to predicate the
; condition for an statically infinite loop.
-define i32 @unconditional_latch(i32* %a, i32 %length) {
+define i32 @unconditional_latch(ptr %a, i32 %length) {
; CHECK-LABEL: @unconditional_latch(
; CHECK-NEXT: loop.preheader:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[LOOP]] ], [ 400, [[LOOP_PREHEADER:%.*]] ]
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[LENGTH:%.*]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
-; CHECK-NEXT: store volatile i32 0, i32* [[A:%.*]], align 4
+; CHECK-NEXT: store volatile i32 0, ptr [[A:%.*]], align 4
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
; CHECK-NEXT: br label [[LOOP]]
;
%i = phi i32 [ %i.next, %loop ], [ 400, %loop.preheader ]
%within.bounds = icmp ult i32 %i, %length
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
- store volatile i32 0, i32* %a
+ store volatile i32 0, ptr %a
%i.next = add i32 %i, 1
br label %loop
}
declare void @llvm.experimental.guard(i1, ...)
-define i32 @unsigned_loop_0_to_n_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @unsigned_loop_0_to_n_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @unsigned_loop_0_to_n_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
ret i32 %result
}
-define i32 @unsigned_loop_0_to_n_ule_latch_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @unsigned_loop_0_to_n_ule_latch_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @unsigned_loop_0_to_n_ule_latch_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ule i32 [[I_NEXT]], [[N]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ule i32 %i.next, %n
ret i32 %result
}
-define i32 @unsigned_loop_0_to_n_ugt_check(i32* %array, i32 %length, i32 %n) {
+define i32 @unsigned_loop_0_to_n_ugt_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @unsigned_loop_0_to_n_ugt_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
ret i32 %result
}
-define i32 @signed_loop_0_to_n_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], [[N]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp slt i32 %i.next, %n
ret i32 %result
}
-define i32 @signed_loop_0_to_n_ult_check_length_range_known(i32* %array, i32* %length.ptr, i32 %n) {
+define i32 @signed_loop_0_to_n_ult_check_length_range_known(ptr %array, ptr %length.ptr, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_ult_check_length_range_known(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
-; CHECK-NEXT: [[LENGTH:%.*]] = load i32, i32* [[LENGTH_PTR:%.*]], align 4, !range [[RNG2:![0-9]+]]
+; CHECK-NEXT: [[LENGTH:%.*]] = load i32, ptr [[LENGTH_PTR:%.*]], align 4, !range [[RNG2:![0-9]+]]
; CHECK-NEXT: br i1 [[TMP5]], label [[EXIT:%.*]], label [[LOOP_PREHEADER:%.*]]
; CHECK: loop.preheader:
; CHECK-NEXT: [[TMP0:%.*]] = icmp sle i32 [[N]], [[LENGTH]]
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], [[N]]
;
entry:
%tmp5 = icmp sle i32 %n, 0
- %length = load i32, i32* %length.ptr, !range !1
+ %length = load i32, ptr %length.ptr, !range !1
br i1 %tmp5, label %exit, label %loop.preheader
loop.preheader: ; preds = %entry
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp slt i32 %i.next, %n
ret i32 %result
}
-define i32 @signed_loop_0_to_n_inverse_latch_predicate(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_inverse_latch_predicate(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_inverse_latch_predicate(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp sgt i32 [[I_NEXT]], [[N]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp sgt i32 %i.next, %n
ret i32 %result
}
-define i32 @signed_loop_0_to_n_sle_latch_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_sle_latch_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_sle_latch_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp sle i32 [[I_NEXT]], [[N]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp sle i32 %i.next, %n
ret i32 %result
}
-define i32 @signed_loop_0_to_n_preincrement_latch_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_preincrement_latch_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_preincrement_latch_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I]], [[N]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add i32 %i, 1
%continue = icmp slt i32 %i, %n
ret i32 %result
}
-define i32 @signed_loop_0_to_n_preincrement_latch_check_postincrement_guard_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_preincrement_latch_check_postincrement_guard_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_preincrement_latch_check_postincrement_guard_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I]], [[N]]
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]], !prof [[PROF1]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%continue = icmp slt i32 %i, %n
br i1 %continue, label %loop, label %exit, !prof !2
ret i32 %result
}
-define i32 @signed_loop_0_to_n_sle_latch_offset_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_sle_latch_offset_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_sle_latch_offset_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp sle i32 [[I_NEXT]], [[N]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add i32 %i, 1
%continue = icmp sle i32 %i.next, %n
ret i32 %result
}
-define i32 @signed_loop_0_to_n_offset_sle_latch_offset_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_offset_sle_latch_offset_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_offset_sle_latch_offset_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
; CHECK-NEXT: [[I_NEXT_OFFSET:%.*]] = add i32 [[I_NEXT]], 1
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add i32 %i, 1
%i.next.offset = add i32 %i.next, 1
ret i32 %result
}
-define i32 @unsupported_latch_pred_loop_0_to_n(i32* %array, i32 %length, i32 %n) {
+define i32 @unsupported_latch_pred_loop_0_to_n(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @unsupported_latch_pred_loop_0_to_n(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: ret i32 [[DEOPTCALL]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ne i32 [[I_NEXT]], [[N]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nsw i32 %i, 1
%continue = icmp ne i32 %i.next, %n
ret i32 %result
}
-define i32 @signed_loop_0_to_n_unsupported_iv_step(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_unsupported_iv_step(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_unsupported_iv_step(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: ret i32 [[DEOPTCALL]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 2
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], [[N]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nsw i32 %i, 2
%continue = icmp slt i32 %i.next, %n
ret i32 %result
}
-define i32 @signed_loop_0_to_n_equal_iv_range_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_equal_iv_range_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_equal_iv_range_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[J_NEXT]] = add nsw i32 [[J]], 1
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%j.next = add nsw i32 %j, 1
%i.next = add nsw i32 %i, 1
ret i32 %result
}
-define i32 @signed_loop_start_to_n_offset_iv_range_check(i32* %array, i32 %start.i, i32 %start.j, i32 %length, i32 %n) {
+define i32 @signed_loop_start_to_n_offset_iv_range_check(ptr %array, i32 %start.i, i32 %start.j, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_start_to_n_offset_iv_range_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[J_NEXT]] = add i32 [[J]], 1
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%j.next = add i32 %j, 1
%i.next = add i32 %i, 1
ret i32 %result
}
-define i32 @signed_loop_0_to_n_different_iv_types(i32* %array, i16 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_different_iv_types(ptr %array, i16 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_different_iv_types(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: ret i32 [[DEOPTCALL]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[J_NEXT]] = add i16 [[J]], 1
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%j.next = add i16 %j, 1
%i.next = add i32 %i, 1
ret i32 %result
}
-define i32 @signed_loop_0_to_n_different_iv_strides(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_different_iv_strides(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_different_iv_strides(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: ret i32 [[DEOPTCALL]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[J_NEXT]] = add nsw i32 [[J]], 2
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%j.next = add nsw i32 %j, 2
%i.next = add nsw i32 %i, 1
ret i32 %result
}
-define i32 @two_range_checks(i32* %array.1, i32 %length.1, i32* %array.2, i32 %length.2, i32 %n) {
+define i32 @two_range_checks(ptr %array.1, i32 %length.1, ptr %array.2, i32 %length.2, i32 %n) {
; CHECK-LABEL: @two_range_checks(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_1:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, i32* [[ARRAY_1_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_1:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, ptr [[ARRAY_1_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_1:%.*]] = add i32 [[LOOP_ACC]], [[ARRAY_1_I]]
-; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_2:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, i32* [[ARRAY_2_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_2:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, ptr [[ARRAY_2_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC_1]], [[ARRAY_2_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.1.i.ptr = getelementptr inbounds i32, i32* %array.1, i64 %i.i64
- %array.1.i = load i32, i32* %array.1.i.ptr, align 4
+ %array.1.i.ptr = getelementptr inbounds i32, ptr %array.1, i64 %i.i64
+ %array.1.i = load i32, ptr %array.1.i.ptr, align 4
%loop.acc.1 = add i32 %loop.acc, %array.1.i
- %array.2.i.ptr = getelementptr inbounds i32, i32* %array.2, i64 %i.i64
- %array.2.i = load i32, i32* %array.2.i.ptr, align 4
+ %array.2.i.ptr = getelementptr inbounds i32, ptr %array.2, i64 %i.i64
+ %array.2.i = load i32, ptr %array.2.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc.1, %array.2.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
ret i32 %result
}
-define i32 @three_range_checks(i32* %array.1, i32 %length.1, i32* %array.2, i32 %length.2, i32* %array.3, i32 %length.3, i32 %n) {
+define i32 @three_range_checks(ptr %array.1, i32 %length.1, ptr %array.2, i32 %length.2, ptr %array.3, i32 %length.3, i32 %n) {
; CHECK-LABEL: @three_range_checks(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_1:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, i32* [[ARRAY_1_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_1:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, ptr [[ARRAY_1_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_1:%.*]] = add i32 [[LOOP_ACC]], [[ARRAY_1_I]]
-; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_2:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, i32* [[ARRAY_2_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_2:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, ptr [[ARRAY_2_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_2:%.*]] = add i32 [[LOOP_ACC_1]], [[ARRAY_2_I]]
-; CHECK-NEXT: [[ARRAY_3_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_3:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_3_I:%.*]] = load i32, i32* [[ARRAY_3_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_3_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_3:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_3_I:%.*]] = load i32, ptr [[ARRAY_3_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC_2]], [[ARRAY_3_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.1.i.ptr = getelementptr inbounds i32, i32* %array.1, i64 %i.i64
- %array.1.i = load i32, i32* %array.1.i.ptr, align 4
+ %array.1.i.ptr = getelementptr inbounds i32, ptr %array.1, i64 %i.i64
+ %array.1.i = load i32, ptr %array.1.i.ptr, align 4
%loop.acc.1 = add i32 %loop.acc, %array.1.i
- %array.2.i.ptr = getelementptr inbounds i32, i32* %array.2, i64 %i.i64
- %array.2.i = load i32, i32* %array.2.i.ptr, align 4
+ %array.2.i.ptr = getelementptr inbounds i32, ptr %array.2, i64 %i.i64
+ %array.2.i = load i32, ptr %array.2.i.ptr, align 4
%loop.acc.2 = add i32 %loop.acc.1, %array.2.i
- %array.3.i.ptr = getelementptr inbounds i32, i32* %array.3, i64 %i.i64
- %array.3.i = load i32, i32* %array.3.i.ptr, align 4
+ %array.3.i.ptr = getelementptr inbounds i32, ptr %array.3, i64 %i.i64
+ %array.3.i = load i32, ptr %array.3.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc.2, %array.3.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
ret i32 %result
}
-define i32 @three_guards(i32* %array.1, i32 %length.1, i32* %array.2, i32 %length.2, i32* %array.3, i32 %length.3, i32 %n) {
+define i32 @three_guards(ptr %array.1, i32 %length.1, ptr %array.2, i32 %length.2, ptr %array.3, i32 %length.3, i32 %n) {
; CHECK-LABEL: @three_guards(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS_1]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_1:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, i32* [[ARRAY_1_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_1:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, ptr [[ARRAY_1_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_1:%.*]] = add i32 [[LOOP_ACC]], [[ARRAY_1_I]]
; CHECK-NEXT: [[WITHIN_BOUNDS_2:%.*]] = icmp ult i32 [[I]], [[LENGTH_2]]
; CHECK-NEXT: [[WIDENABLE_COND4:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: ret i32 [[DEOPTCALL3]]
; CHECK: guarded1:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS_2]])
-; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_2:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, i32* [[ARRAY_2_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_2:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, ptr [[ARRAY_2_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_2:%.*]] = add i32 [[LOOP_ACC_1]], [[ARRAY_2_I]]
; CHECK-NEXT: [[WITHIN_BOUNDS_3:%.*]] = icmp ult i32 [[I]], [[LENGTH_3]]
; CHECK-NEXT: [[WIDENABLE_COND9:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: ret i32 [[DEOPTCALL8]]
; CHECK: guarded6:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS_3]])
-; CHECK-NEXT: [[ARRAY_3_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_3:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_3_I:%.*]] = load i32, i32* [[ARRAY_3_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_3_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_3:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_3_I:%.*]] = load i32, ptr [[ARRAY_3_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC_2]], [[ARRAY_3_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.1.i.ptr = getelementptr inbounds i32, i32* %array.1, i64 %i.i64
- %array.1.i = load i32, i32* %array.1.i.ptr, align 4
+ %array.1.i.ptr = getelementptr inbounds i32, ptr %array.1, i64 %i.i64
+ %array.1.i = load i32, ptr %array.1.i.ptr, align 4
%loop.acc.1 = add i32 %loop.acc, %array.1.i
%within.bounds.2 = icmp ult i32 %i, %length.2
%widenable_cond4 = call i1 @llvm.experimental.widenable.condition()
ret i32 %deoptcall3
guarded1: ; preds = %guarded
- %array.2.i.ptr = getelementptr inbounds i32, i32* %array.2, i64 %i.i64
- %array.2.i = load i32, i32* %array.2.i.ptr, align 4
+ %array.2.i.ptr = getelementptr inbounds i32, ptr %array.2, i64 %i.i64
+ %array.2.i = load i32, ptr %array.2.i.ptr, align 4
%loop.acc.2 = add i32 %loop.acc.1, %array.2.i
%within.bounds.3 = icmp ult i32 %i, %length.3
%widenable_cond9 = call i1 @llvm.experimental.widenable.condition()
ret i32 %deoptcall8
guarded6: ; preds = %guarded1
- %array.3.i.ptr = getelementptr inbounds i32, i32* %array.3, i64 %i.i64
- %array.3.i = load i32, i32* %array.3.i.ptr, align 4
+ %array.3.i.ptr = getelementptr inbounds i32, ptr %array.3, i64 %i.i64
+ %array.3.i = load i32, ptr %array.3.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc.2, %array.3.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
ret i32 %result
}
-define i32 @unsigned_loop_0_to_n_unrelated_condition(i32* %array, i32 %length, i32 %n, i32 %x) {
+define i32 @unsigned_loop_0_to_n_unrelated_condition(ptr %array, i32 %length, i32 %n, i32 %x) {
; CHECK-LABEL: @unsigned_loop_0_to_n_unrelated_condition(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[GUARD_COND]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
ret i32 %result
}
-define i32 @test_no_widened_conditions(i32* %array, i32 %length, i32 %n, i32 %x1, i32 %x2, i32 %x3) {
+define i32 @test_no_widened_conditions(ptr %array, i32 %length, i32 %n, i32 %x1, i32 %x2, i32 %x3) {
; CHECK-LABEL: @test_no_widened_conditions(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: ret i32 [[DEOPTCALL]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
ret i32 %result
}
-define i32 @signed_loop_start_to_n_loop_variant_bound(i32* %array, i32 %x, i32 %start, i32 %n) {
+define i32 @signed_loop_start_to_n_loop_variant_bound(ptr %array, i32 %x, i32 %start, i32 %n) {
; CHECK-LABEL: @signed_loop_start_to_n_loop_variant_bound(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: ret i32 [[DEOPTCALL]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], [[N]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nsw i32 %i, 1
%continue = icmp slt i32 %i.next, %n
ret i32 %result
}
-define i32 @signed_loop_start_to_n_non_monotonic_predicate(i32* %array, i32 %x, i32 %start, i32 %n) {
+define i32 @signed_loop_start_to_n_non_monotonic_predicate(ptr %array, i32 %x, i32 %start, i32 %n) {
; CHECK-LABEL: @signed_loop_start_to_n_non_monotonic_predicate(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: ret i32 [[DEOPTCALL]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], [[N]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nsw i32 %i, 1
%continue = icmp slt i32 %i.next, %n
ret i32 %result
}
-define i32 @unsigned_loop_0_to_n_hoist_length(i32* %array, i16 %length.i16, i32 %n) {
+define i32 @unsigned_loop_0_to_n_hoist_length(ptr %array, i16 %length.i16, i32 %n) {
; CHECK-LABEL: @unsigned_loop_0_to_n_hoist_length(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
ret i32 %result
}
-define i32 @unsigned_loop_0_to_n_cant_hoist_length(i32* %array, i32 %length, i32 %divider, i32 %n) {
+define i32 @unsigned_loop_0_to_n_cant_hoist_length(ptr %array, i32 %length, i32 %divider, i32 %n) {
; CHECK-LABEL: @unsigned_loop_0_to_n_cant_hoist_length(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
; Make sure that if we're going to consider a branch widenable, that the
; call to widenable condition is actually present.
-define i32 @negative_WC_required(i32* %array, i32 %length, i32 %n, i1 %unrelated) {
+define i32 @negative_WC_required(ptr %array, i32 %length, i32 %n, i1 %unrelated) {
; CHECK-LABEL: @negative_WC_required(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: ret i32 [[DEOPTCALL]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]], !prof [[PROF1]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- store i32 0, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ store i32 0, ptr %array.i.ptr, align 4
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
br i1 %continue, label %loop, label %exit, !prof !2
ret i32 0
}
-define i32 @swapped_wb(i32* %array, i32 %length, i32 %n) {
+define i32 @swapped_wb(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @swapped_wb(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
@UNKNOWN = external global i1
-define i32 @neg_length_variant(i32* %array, i32* %length, i32 %n) {
+define i32 @neg_length_variant(ptr %array, ptr %length, i32 %n) {
; CHECK-LABEL: @neg_length_variant(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK: loop:
; CHECK-NEXT: [[LOOP_ACC:%.*]] = phi i32 [ [[LOOP_ACC_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
-; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, i1* @UNKNOWN, align 1
+; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, ptr @UNKNOWN, align 1
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[UNKNOWN]]) [ "deopt"() ]
-; CHECK-NEXT: [[LEN:%.*]] = load i32, i32* [[LENGTH:%.*]], align 4
+; CHECK-NEXT: [[LEN:%.*]] = load i32, ptr [[LENGTH:%.*]], align 4
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[LEN]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
loop:
%loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
%i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
- %unknown = load volatile i1, i1* @UNKNOWN
+ %unknown = load volatile i1, ptr @UNKNOWN
call void (i1, ...) @llvm.experimental.guard(i1 %unknown) [ "deopt"() ]
- %len = load i32, i32* %length, align 4
+ %len = load i32, ptr %length, align 4
%within.bounds = icmp ult i32 %i, %len
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
ret i32 %result
}
-define i32 @invariant_load_guard_limit(i32* %array, i32* %length, i32 %n) {
+define i32 @invariant_load_guard_limit(ptr %array, ptr %length, i32 %n) {
; CHECK-LABEL: @invariant_load_guard_limit(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK: loop:
; CHECK-NEXT: [[LOOP_ACC:%.*]] = phi i32 [ [[LOOP_ACC_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
-; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, i1* @UNKNOWN, align 1
+; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, ptr @UNKNOWN, align 1
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[UNKNOWN]]) [ "deopt"() ]
-; CHECK-NEXT: [[LEN:%.*]] = load i32, i32* [[LENGTH:%.*]], align 4, !invariant.load !0
+; CHECK-NEXT: [[LEN:%.*]] = load i32, ptr [[LENGTH:%.*]], align 4, !invariant.load !0
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[LEN]]
; CHECK-NEXT: [[TMP0:%.*]] = icmp ule i32 [[N]], [[LEN]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 0, [[LEN]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
loop:
%loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
%i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
- %unknown = load volatile i1, i1* @UNKNOWN
+ %unknown = load volatile i1, ptr @UNKNOWN
call void (i1, ...) @llvm.experimental.guard(i1 %unknown) [ "deopt"() ]
- %len = load i32, i32* %length, align 4, !invariant.load !{}
+ %len = load i32, ptr %length, align 4, !invariant.load !{}
%within.bounds = icmp ult i32 %i, %len
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
; Case where we have an invariant load, but it's not loading from a loop
; invariant location.
-define i32 @neg_varying_invariant_load_op(i32* %array, i32* %lengths, i32 %n) {
+define i32 @neg_varying_invariant_load_op(ptr %array, ptr %lengths, i32 %n) {
; CHECK-LABEL: @neg_varying_invariant_load_op(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK: loop:
; CHECK-NEXT: [[LOOP_ACC:%.*]] = phi i32 [ [[LOOP_ACC_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
-; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, i1* @UNKNOWN, align 1
+; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, ptr @UNKNOWN, align 1
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[UNKNOWN]]) [ "deopt"() ]
-; CHECK-NEXT: [[LENGTH_ADDR:%.*]] = getelementptr i32, i32* [[LENGTHS:%.*]], i32 [[I]]
-; CHECK-NEXT: [[LEN:%.*]] = load i32, i32* [[LENGTH_ADDR]], align 4, !invariant.load !0
+; CHECK-NEXT: [[LENGTH_ADDR:%.*]] = getelementptr i32, ptr [[LENGTHS:%.*]], i32 [[I]]
+; CHECK-NEXT: [[LEN:%.*]] = load i32, ptr [[LENGTH_ADDR]], align 4, !invariant.load !0
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[LEN]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
loop:
%loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
%i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
- %unknown = load volatile i1, i1* @UNKNOWN
+ %unknown = load volatile i1, ptr @UNKNOWN
call void (i1, ...) @llvm.experimental.guard(i1 %unknown) [ "deopt"() ]
- %length.addr = getelementptr i32, i32* %lengths, i32 %i
- %len = load i32, i32* %length.addr, align 4, !invariant.load !{}
+ %length.addr = getelementptr i32, ptr %lengths, i32 %i
+ %len = load i32, ptr %length.addr, align 4, !invariant.load !{}
%within.bounds = icmp ult i32 %i, %len
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
; This is a case where moving the load which provides the limit for the latch
; would be invalid, so we can't preform the tempting transform. Loading the
; latch limit may fault since we could always fail the guard.
-define i32 @neg_invariant_load_latch_limit(i32* %array, i32* %length, i32 %n) {
+define i32 @neg_invariant_load_latch_limit(ptr %array, ptr %length, i32 %n) {
; CHECK-LABEL: @neg_invariant_load_latch_limit(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK: loop:
; CHECK-NEXT: [[LOOP_ACC:%.*]] = phi i32 [ [[LOOP_ACC_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
-; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, i1* @UNKNOWN, align 1
+; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, ptr @UNKNOWN, align 1
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[UNKNOWN]]) [ "deopt"() ]
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[N]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
-; CHECK-NEXT: [[LEN:%.*]] = load i32, i32* [[LENGTH:%.*]], align 4, !invariant.load !0
+; CHECK-NEXT: [[LEN:%.*]] = load i32, ptr [[LENGTH:%.*]], align 4, !invariant.load !0
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[LEN]]
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
; CHECK: exit.loopexit:
loop:
%loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
%i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
- %unknown = load volatile i1, i1* @UNKNOWN
+ %unknown = load volatile i1, ptr @UNKNOWN
call void (i1, ...) @llvm.experimental.guard(i1 %unknown) [ "deopt"() ]
%within.bounds = icmp ult i32 %i, %n
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
- %len = load i32, i32* %length, align 4, !invariant.load !{}
+ %len = load i32, ptr %length, align 4, !invariant.load !{}
%continue = icmp ult i32 %i.next, %len
br i1 %continue, label %loop, label %exit
ret i32 %result
}
-define i32 @invariant_load_latch_limit(i32* %array,
+define i32 @invariant_load_latch_limit(ptr %array,
; CHECK-LABEL: @invariant_load_latch_limit(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK: loop:
; CHECK-NEXT: [[LOOP_ACC:%.*]] = phi i32 [ [[LOOP_ACC_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
-; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, i1* @UNKNOWN, align 1
+; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, ptr @UNKNOWN, align 1
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[UNKNOWN]]) [ "deopt"() ]
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[N]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
-; CHECK-NEXT: [[LEN:%.*]] = load i32, i32* [[LENGTH:%.*]], align 4, !invariant.load !0
+; CHECK-NEXT: [[LEN:%.*]] = load i32, ptr [[LENGTH:%.*]], align 4, !invariant.load !0
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[LEN]]
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
; CHECK: exit.loopexit:
; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[LOOP_ACC_NEXT_LCSSA]], [[EXIT_LOOPEXIT]] ]
; CHECK-NEXT: ret i32 [[RESULT]]
;
- i32* dereferenceable(4) %length,
+ ptr dereferenceable(4) %length,
i32 %n) {
entry:
%tmp5 = icmp eq i32 %n, 0
loop:
%loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
%i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
- %unknown = load volatile i1, i1* @UNKNOWN
+ %unknown = load volatile i1, ptr @UNKNOWN
call void (i1, ...) @llvm.experimental.guard(i1 %unknown) [ "deopt"() ]
%within.bounds = icmp ult i32 %i, %n
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
- %len = load i32, i32* %length, align 4, !invariant.load !{}
+ %len = load i32, ptr %length, align 4, !invariant.load !{}
%continue = icmp ult i32 %i.next, %len
br i1 %continue, label %loop, label %exit
@Length = external constant i32
-define i32 @constant_memory(i32* %array, i32 %n) {
+define i32 @constant_memory(ptr %array, i32 %n) {
; CHECK-LABEL: @constant_memory(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK: loop:
; CHECK-NEXT: [[LOOP_ACC:%.*]] = phi i32 [ [[LOOP_ACC_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
-; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, i1* @UNKNOWN, align 1
+; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, ptr @UNKNOWN, align 1
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[UNKNOWN]]) [ "deopt"() ]
-; CHECK-NEXT: [[LEN:%.*]] = load i32, i32* @Length, align 4
+; CHECK-NEXT: [[LEN:%.*]] = load i32, ptr @Length, align 4
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[LEN]]
; CHECK-NEXT: [[TMP0:%.*]] = icmp ule i32 [[N]], [[LEN]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 0, [[LEN]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
loop:
%loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
%i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
- %unknown = load volatile i1, i1* @UNKNOWN
+ %unknown = load volatile i1, ptr @UNKNOWN
call void (i1, ...) @llvm.experimental.guard(i1 %unknown) [ "deopt"() ]
- %len = load i32, i32* @Length, align 4
+ %len = load i32, ptr @Length, align 4
%within.bounds = icmp ult i32 %i, %len
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
ret i32 %result
}
-define i32 @constant_length(i32* %array, i32 %n) {
+define i32 @constant_length(ptr %array, i32 %n) {
; CHECK-LABEL: @constant_length(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK: loop:
; CHECK-NEXT: [[LOOP_ACC:%.*]] = phi i32 [ [[LOOP_ACC_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
-; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, i1* @UNKNOWN, align 1
+; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, ptr @UNKNOWN, align 1
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[UNKNOWN]]) [ "deopt"() ]
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], 20
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP1]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
loop:
%loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
%i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
- %unknown = load volatile i1, i1* @UNKNOWN
+ %unknown = load volatile i1, ptr @UNKNOWN
call void (i1, ...) @llvm.experimental.guard(i1 %unknown) [ "deopt"() ]
%within.bounds = icmp ult i32 %i, 20
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
declare void @llvm.experimental.guard(i1, ...)
-define i32 @signed_loop_0_to_n_nested_0_to_l_inner_index_check(i32* %array, i32 %length, i32 %n, i32 %l) {
+define i32 @signed_loop_0_to_n_nested_0_to_l_inner_index_check(ptr %array, i32 %length, i32 %n, i32 %l) {
; CHECK-LABEL: @signed_loop_0_to_n_nested_0_to_l_inner_index_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[J_I64:%.*]] = zext i32 [[J]] to i64
-; CHECK-NEXT: [[ARRAY_J_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[J_I64]]
-; CHECK-NEXT: [[ARRAY_J:%.*]] = load i32, i32* [[ARRAY_J_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_J_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[J_I64]]
+; CHECK-NEXT: [[ARRAY_J:%.*]] = load i32, ptr [[ARRAY_J_PTR]], align 4
; CHECK-NEXT: [[INNER_LOOP_ACC_NEXT]] = add i32 [[INNER_LOOP_ACC]], [[ARRAY_J]]
; CHECK-NEXT: [[J_NEXT]] = add nsw i32 [[J]], 1
; CHECK-NEXT: [[INNER_CONTINUE:%.*]] = icmp slt i32 [[J_NEXT]], [[L]]
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%j.i64 = zext i32 %j to i64
- %array.j.ptr = getelementptr inbounds i32, i32* %array, i64 %j.i64
- %array.j = load i32, i32* %array.j.ptr, align 4
+ %array.j.ptr = getelementptr inbounds i32, ptr %array, i64 %j.i64
+ %array.j = load i32, ptr %array.j.ptr, align 4
%inner.loop.acc.next = add i32 %inner.loop.acc, %array.j
%j.next = add nsw i32 %j, 1
ret i32 %result
}
-define i32 @signed_loop_0_to_n_nested_0_to_l_outer_index_check(i32* %array, i32 %length, i32 %n, i32 %l) {
+define i32 @signed_loop_0_to_n_nested_0_to_l_outer_index_check(ptr %array, i32 %length, i32 %n, i32 %l) {
; CHECK-LABEL: @signed_loop_0_to_n_nested_0_to_l_outer_index_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[INNER_LOOP_ACC_NEXT]] = add i32 [[INNER_LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[J_NEXT]] = add nsw i32 [[J]], 1
; CHECK-NEXT: [[INNER_CONTINUE:%.*]] = icmp slt i32 [[J_NEXT]], [[L]]
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%inner.loop.acc.next = add i32 %inner.loop.acc, %array.i
%j.next = add nsw i32 %j, 1
ret i32 %result
}
-define i32 @signed_loop_0_to_n_nested_i_to_l_inner_index_check(i32* %array, i32 %length, i32 %n, i32 %l) {
+define i32 @signed_loop_0_to_n_nested_i_to_l_inner_index_check(ptr %array, i32 %length, i32 %n, i32 %l) {
; CHECK-LABEL: @signed_loop_0_to_n_nested_i_to_l_inner_index_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[TMP5]])
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[J_I64:%.*]] = zext i32 [[J]] to i64
-; CHECK-NEXT: [[ARRAY_J_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[J_I64]]
-; CHECK-NEXT: [[ARRAY_J:%.*]] = load i32, i32* [[ARRAY_J_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_J_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[J_I64]]
+; CHECK-NEXT: [[ARRAY_J:%.*]] = load i32, ptr [[ARRAY_J_PTR]], align 4
; CHECK-NEXT: [[INNER_LOOP_ACC_NEXT]] = add i32 [[INNER_LOOP_ACC]], [[ARRAY_J]]
; CHECK-NEXT: [[J_NEXT]] = add nsw i32 [[J]], 1
; CHECK-NEXT: [[INNER_CONTINUE:%.*]] = icmp slt i32 [[J_NEXT]], [[L]]
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%j.i64 = zext i32 %j to i64
- %array.j.ptr = getelementptr inbounds i32, i32* %array, i64 %j.i64
- %array.j = load i32, i32* %array.j.ptr, align 4
+ %array.j.ptr = getelementptr inbounds i32, ptr %array, i64 %j.i64
+ %array.j = load i32, ptr %array.j.ptr, align 4
%inner.loop.acc.next = add i32 %inner.loop.acc, %array.j
%j.next = add nsw i32 %j, 1
ret i32 %result
}
-define i32 @cant_expand_guard_check_start(i32* %array, i32 %length, i32 %n, i32 %l, i32 %maybezero) {
+define i32 @cant_expand_guard_check_start(ptr %array, i32 %length, i32 %n, i32 %l, i32 %maybezero) {
; CHECK-LABEL: @cant_expand_guard_check_start(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[J]], [[LENGTH:%.*]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[J_I64:%.*]] = zext i32 [[J]] to i64
-; CHECK-NEXT: [[ARRAY_J_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[J_I64]]
-; CHECK-NEXT: [[ARRAY_J:%.*]] = load i32, i32* [[ARRAY_J_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_J_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[J_I64]]
+; CHECK-NEXT: [[ARRAY_J:%.*]] = load i32, ptr [[ARRAY_J_PTR]], align 4
; CHECK-NEXT: [[INNER_LOOP_ACC_NEXT]] = add i32 [[INNER_LOOP_ACC]], [[ARRAY_J]]
; CHECK-NEXT: [[J_NEXT]] = add nsw i32 [[J]], 1
; CHECK-NEXT: [[INNER_CONTINUE:%.*]] = icmp slt i32 [[J_NEXT]], [[L]]
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%j.i64 = zext i32 %j to i64
- %array.j.ptr = getelementptr inbounds i32, i32* %array, i64 %j.i64
- %array.j = load i32, i32* %array.j.ptr, align 4
+ %array.j.ptr = getelementptr inbounds i32, ptr %array, i64 %j.i64
+ %array.j = load i32, ptr %array.j.ptr, align 4
%inner.loop.acc.next = add i32 %inner.loop.acc, %array.j
%j.next = add nsw i32 %j, 1
declare void @prevent_merging()
; Base case - with side effects in loop
-define i32 @test1(i32* %array, i32 %length, i32 %n, i1 %cond_0) {
+define i32 @test1(ptr %array, i32 %length, i32 %n, i1 %cond_0) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: ret i32 [[DEOPTRET2]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
guarded:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
-define i32 @test_non_canonical(i32* %array, i32 %length, i1 %cond_0) {
+define i32 @test_non_canonical(ptr %array, i32 %length, i1 %cond_0) {
; CHECK-LABEL: @test_non_canonical(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: ret i32 [[DEOPTRET2]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[LENGTH]]
guarded:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %length
}
-define i32 @test_two_range_checks(i32* %array, i32 %length.1, i32 %length.2, i32 %n, i1 %cond_0) {
+define i32 @test_two_range_checks(ptr %array, i32 %length.1, i32 %length.2, i32 %n, i1 %cond_0) {
; CHECK-LABEL: @test_two_range_checks(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: ret i32 [[DEOPTRET3]]
; CHECK: guarded2:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
guarded2:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
@G = external global i32
-define i32 @test_unanalyzeable_exit(i32* %array, i32 %length, i32 %n, i1 %cond_0) {
+define i32 @test_unanalyzeable_exit(ptr %array, i32 %length, i32 %n, i1 %cond_0) {
; CHECK-LABEL: @test_unanalyzeable_exit(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: [[LOOP_ACC:%.*]] = phi i32 [ [[LOOP_ACC_NEXT:%.*]], [[GUARDED2:%.*]] ], [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[GUARDED2]] ], [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: call void @unknown()
-; CHECK-NEXT: [[VOL:%.*]] = load volatile i32, i32* @G, align 4
+; CHECK-NEXT: [[VOL:%.*]] = load volatile i32, ptr @G, align 4
; CHECK-NEXT: [[UNKNOWN:%.*]] = icmp eq i32 [[VOL]], 0
; CHECK-NEXT: br i1 [[UNKNOWN]], label [[GUARDED2]], label [[DEOPT3:%.*]], !prof !0
; CHECK: deopt3:
; CHECK-NEXT: ret i32 [[DEOPTRET3]]
; CHECK: guarded2:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N:%.*]]
%loop.acc = phi i32 [ %loop.acc.next, %guarded2 ], [ 0, %loop.preheader ]
%i = phi i32 [ %i.next, %guarded2 ], [ 0, %loop.preheader ]
call void @unknown()
- %vol = load volatile i32, i32* @G
+ %vol = load volatile i32, ptr @G
%unknown = icmp eq i32 %vol, 0
br i1 %unknown, label %guarded2, label %deopt3, !prof !0
guarded2:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
ret i32 %result
}
-define i32 @test_unanalyzeable_exit2(i32* %array, i32 %length, i32 %n, i1 %cond_0) {
+define i32 @test_unanalyzeable_exit2(ptr %array, i32 %length, i32 %n, i1 %cond_0) {
; CHECK-LABEL: @test_unanalyzeable_exit2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: [[DEOPTRET2:%.*]] = call i32 (...) @llvm.experimental.deoptimize.i32() [ "deopt"() ]
; CHECK-NEXT: ret i32 [[DEOPTRET2]]
; CHECK: guarded:
-; CHECK-NEXT: [[VOL:%.*]] = load volatile i32, i32* @G, align 4
+; CHECK-NEXT: [[VOL:%.*]] = load volatile i32, ptr @G, align 4
; CHECK-NEXT: [[UNKNOWN:%.*]] = icmp eq i32 [[VOL]], 0
; CHECK-NEXT: br i1 [[UNKNOWN]], label [[GUARDED2]], label [[DEOPT3:%.*]], !prof !0
; CHECK: deopt3:
; CHECK-NEXT: ret i32 [[DEOPTRET3]]
; CHECK: guarded2:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
ret i32 %deoptret2
guarded:
- %vol = load volatile i32, i32* @G
+ %vol = load volatile i32, ptr @G
%unknown = icmp eq i32 %vol, 0
br i1 %unknown, label %guarded2, label %deopt3, !prof !0
guarded2:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
}
-define i32 @test_unanalyzeable_latch(i32* %array, i32 %length, i32 %n, i1 %cond_0) {
+define i32 @test_unanalyzeable_latch(ptr %array, i32 %length, i32 %n, i1 %cond_0) {
; CHECK-LABEL: @test_unanalyzeable_latch(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: ret i32 [[DEOPTRET2]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
-; CHECK-NEXT: [[VOL:%.*]] = load volatile i32, i32* @G, align 4
+; CHECK-NEXT: [[VOL:%.*]] = load volatile i32, ptr @G, align 4
; CHECK-NEXT: [[UNKNOWN:%.*]] = icmp eq i32 [[VOL]], 0
; CHECK-NEXT: br i1 [[UNKNOWN]], label [[LOOP]], label [[EXIT:%.*]]
; CHECK: exit:
guarded:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
- %vol = load volatile i32, i32* @G
+ %vol = load volatile i32, ptr @G
%unknown = icmp eq i32 %vol, 0
br i1 %unknown, label %loop, label %exit
}
-define i32 @provably_taken(i32* %array, i1 %cond_0) {
+define i32 @provably_taken(ptr %array, i1 %cond_0) {
; CHECK-LABEL: @provably_taken(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: ret i32 [[DEOPTRET2]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], 200
guarded:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, 200
ret i32 %result
}
-define i32 @provably_not_taken(i32* %array, i1 %cond_0) {
+define i32 @provably_not_taken(ptr %array, i1 %cond_0) {
; CHECK-LABEL: @provably_not_taken(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: ret i32 [[DEOPTRET2]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], 200
guarded:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, 200
;; Unswitch likes to produce some ugly exit blocks without simplifications
;; being applied. Make sure we can handle that form.
-define i32 @unswitch_exit_form(i32* %array, i32 %length, i32 %n, i1 %cond_0) {
+define i32 @unswitch_exit_form(ptr %array, i32 %length, i32 %n, i1 %cond_0) {
; CHECK-LABEL: @unswitch_exit_form(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: br i1 true, label [[GUARDED]], label [[DEOPT_LOOPEXIT]], !prof !0
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
guarded:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
ret i32 %result
}
-define i32 @swapped_wb(i32* %array, i32 %length, i32 %n, i1 %cond_0) {
+define i32 @swapped_wb(ptr %array, i32 %length, i32 %n, i1 %cond_0) {
; CHECK-LABEL: @swapped_wb(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: ret i32 [[DEOPTRET2]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
guarded:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
ret i32 %result
}
-define i32 @trivial_wb(i32* %array, i32 %length, i32 %n) {
+define i32 @trivial_wb(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @trivial_wb(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[N:%.*]], i32 1)
; CHECK-NEXT: ret i32 [[DEOPTRET2]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
guarded:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
; TODO: Non-latch exits can still be predicated
; This is currently prevented by an overly restrictive profitability check.
-define i32 @todo_unconditional_latch(i32* %array, i32 %length, i1 %cond_0) {
+define i32 @todo_unconditional_latch(ptr %array, i32 %length, i1 %cond_0) {
; CHECK-LABEL: @todo_unconditional_latch(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: ret i32 [[DEOPTRET2]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: br label [[LOOP]]
guarded:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
br label %loop
; If we have a stray widenable branch in the loop, we should still be able to
; run. This can happen when unswitching's cost model avoids unswitching some
; branches.
-define i32 @wb_in_loop(i32* %array, i32 %length, i32 %n, i1 %cond_0) {
+define i32 @wb_in_loop(ptr %array, i32 %length, i32 %n, i1 %cond_0) {
; CHECK-LABEL: @wb_in_loop(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: ret i32 [[DEOPTRET3]]
; CHECK: guarded2:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
guarded2:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
; CHECK-NEXT: Running pass: SimpleLoopUnswitchPass on loop
; CHECK-NEXT: Running pass: LoopSimplifyCFGPass on loop
-define i32 @unsigned_loop_0_to_n_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @unsigned_loop_0_to_n_ult_check(ptr %array, i32 %length, i32 %n) {
entry:
%tmp5 = icmp eq i32 %n, 0
br i1 %tmp5, label %exit, label %loop.preheader
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
; check.
; LatchExitProbability: 0x04000000 / 0x80000000 = 3.12%
; ExitingBlockProbability: 0x7ffa572a / 0x80000000 = 99.98%
-define i64 @donot_predicate(i64* nocapture readonly %arg, i32 %length, i64* nocapture readonly %arg2, i64* nocapture readonly %n_addr, i64 %i) !prof !21 {
+define i64 @donot_predicate(ptr nocapture readonly %arg, i32 %length, ptr nocapture readonly %arg2, ptr nocapture readonly %n_addr, i64 %i) !prof !21 {
; CHECK-LABEL: @donot_predicate(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LENGTH_EXT:%.*]] = zext i32 [[LENGTH:%.*]] to i64
-; CHECK-NEXT: [[N_PRE:%.*]] = load i64, i64* [[N_ADDR:%.*]], align 4
+; CHECK-NEXT: [[N_PRE:%.*]] = load i64, ptr [[N_ADDR:%.*]], align 4
; CHECK-NEXT: br label [[HEADER:%.*]]
; CHECK: Header:
-; CHECK-NEXT: [[RESULT_IN3:%.*]] = phi i64* [ [[ARG2:%.*]], [[ENTRY:%.*]] ], [ [[ARG:%.*]], [[LATCH:%.*]] ]
+; CHECK-NEXT: [[RESULT_IN3:%.*]] = phi ptr [ [[ARG2:%.*]], [[ENTRY:%.*]] ], [ [[ARG:%.*]], [[LATCH:%.*]] ]
; CHECK-NEXT: [[J2:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[J_NEXT:%.*]], [[LATCH]] ]
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i64 [[J2]], [[LENGTH_EXT]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[COUNTED_SPECULATION_FAILED:%.*]] = call i64 (...) @llvm.experimental.deoptimize.i64(i64 30) [ "deopt"(i32 0) ]
; CHECK-NEXT: ret i64 [[COUNTED_SPECULATION_FAILED]]
; CHECK: exit:
-; CHECK-NEXT: [[RESULT_IN3_LCSSA:%.*]] = phi i64* [ [[RESULT_IN3]], [[HEADER]] ]
-; CHECK-NEXT: [[RESULT_LE:%.*]] = load i64, i64* [[RESULT_IN3_LCSSA]], align 8
+; CHECK-NEXT: [[RESULT_IN3_LCSSA:%.*]] = phi ptr [ [[RESULT_IN3]], [[HEADER]] ]
+; CHECK-NEXT: [[RESULT_LE:%.*]] = load i64, ptr [[RESULT_IN3_LCSSA]], align 8
; CHECK-NEXT: ret i64 [[RESULT_LE]]
;
entry:
%length.ext = zext i32 %length to i64
- %n.pre = load i64, i64* %n_addr, align 4
+ %n.pre = load i64, ptr %n_addr, align 4
br label %Header
Header: ; preds = %entry, %Latch
- %result.in3 = phi i64* [ %arg2, %entry ], [ %arg, %Latch ]
+ %result.in3 = phi ptr [ %arg2, %entry ], [ %arg, %Latch ]
%j2 = phi i64 [ 0, %entry ], [ %j.next, %Latch ]
%within.bounds = icmp ult i64 %j2, %length.ext
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
ret i64 %counted_speculation_failed
exit: ; preds = %Header
- %result.in3.lcssa = phi i64* [ %result.in3, %Header ]
- %result.le = load i64, i64* %result.in3.lcssa, align 8
+ %result.in3.lcssa = phi ptr [ %result.in3, %Header ]
+ %result.le = load i64, ptr %result.in3.lcssa, align 8
ret i64 %result.le
}
!0 = !{!"branch_weights", i32 18, i32 104200}
; predicate loop since there's no profile information and BPI concluded all
; exiting blocks have same probability of exiting from loop.
-define i64 @predicate(i64* nocapture readonly %arg, i32 %length, i64* nocapture readonly %arg2, i64* nocapture readonly %n_addr, i64 %i) !prof !21 {
+define i64 @predicate(ptr nocapture readonly %arg, i32 %length, ptr nocapture readonly %arg2, ptr nocapture readonly %n_addr, i64 %i) !prof !21 {
; CHECK-LABEL: @predicate(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LENGTH_EXT:%.*]] = zext i32 [[LENGTH:%.*]] to i64
-; CHECK-NEXT: [[N_PRE:%.*]] = load i64, i64* [[N_ADDR:%.*]], align 4
+; CHECK-NEXT: [[N_PRE:%.*]] = load i64, ptr [[N_ADDR:%.*]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = icmp ule i64 1048576, [[LENGTH_EXT]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 0, [[LENGTH_EXT]]
; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[TMP1]], [[TMP0]]
; CHECK-NEXT: br label [[HEADER:%.*]]
; CHECK: Header:
-; CHECK-NEXT: [[RESULT_IN3:%.*]] = phi i64* [ [[ARG2:%.*]], [[ENTRY:%.*]] ], [ [[ARG:%.*]], [[LATCH:%.*]] ]
+; CHECK-NEXT: [[RESULT_IN3:%.*]] = phi ptr [ [[ARG2:%.*]], [[ENTRY:%.*]] ], [ [[ARG:%.*]], [[LATCH:%.*]] ]
; CHECK-NEXT: [[J2:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[J_NEXT:%.*]], [[LATCH]] ]
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i64 [[J2]], [[LENGTH_EXT]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK: exitLatch:
; CHECK-NEXT: ret i64 1
; CHECK: exit:
-; CHECK-NEXT: [[RESULT_IN3_LCSSA:%.*]] = phi i64* [ [[RESULT_IN3]], [[HEADER]] ]
-; CHECK-NEXT: [[RESULT_LE:%.*]] = load i64, i64* [[RESULT_IN3_LCSSA]], align 8
+; CHECK-NEXT: [[RESULT_IN3_LCSSA:%.*]] = phi ptr [ [[RESULT_IN3]], [[HEADER]] ]
+; CHECK-NEXT: [[RESULT_LE:%.*]] = load i64, ptr [[RESULT_IN3_LCSSA]], align 8
; CHECK-NEXT: ret i64 [[RESULT_LE]]
;
entry:
%length.ext = zext i32 %length to i64
- %n.pre = load i64, i64* %n_addr, align 4
+ %n.pre = load i64, ptr %n_addr, align 4
br label %Header
Header: ; preds = %entry, %Latch
- %result.in3 = phi i64* [ %arg2, %entry ], [ %arg, %Latch ]
+ %result.in3 = phi ptr [ %arg2, %entry ], [ %arg, %Latch ]
%j2 = phi i64 [ 0, %entry ], [ %j.next, %Latch ]
%within.bounds = icmp ult i64 %j2, %length.ext
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
ret i64 1
exit: ; preds = %Header
- %result.in3.lcssa = phi i64* [ %result.in3, %Header ]
- %result.le = load i64, i64* %result.in3.lcssa, align 8
+ %result.in3.lcssa = phi ptr [ %result.in3, %Header ]
+ %result.le = load i64, ptr %result.in3.lcssa, align 8
ret i64 %result.le
}
; the loop is the header exiting block (not the latch block). So do not predicate.
; LatchExitProbability: 0x000020e1 / 0x80000000 = 0.00%
; ExitingBlockProbability: 0x7ffcbb86 / 0x80000000 = 99.99%
-define i64 @donot_predicate_prof(i64* nocapture readonly %arg, i32 %length, i64* nocapture readonly %arg2, i64* nocapture readonly %n_addr, i64 %i) !prof !21 {
+define i64 @donot_predicate_prof(ptr nocapture readonly %arg, i32 %length, ptr nocapture readonly %arg2, ptr nocapture readonly %n_addr, i64 %i) !prof !21 {
; CHECK-LABEL: @donot_predicate_prof(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LENGTH_EXT:%.*]] = zext i32 [[LENGTH:%.*]] to i64
-; CHECK-NEXT: [[N_PRE:%.*]] = load i64, i64* [[N_ADDR:%.*]], align 4
+; CHECK-NEXT: [[N_PRE:%.*]] = load i64, ptr [[N_ADDR:%.*]], align 4
; CHECK-NEXT: br label [[HEADER:%.*]]
; CHECK: Header:
-; CHECK-NEXT: [[RESULT_IN3:%.*]] = phi i64* [ [[ARG2:%.*]], [[ENTRY:%.*]] ], [ [[ARG:%.*]], [[LATCH:%.*]] ]
+; CHECK-NEXT: [[RESULT_IN3:%.*]] = phi ptr [ [[ARG2:%.*]], [[ENTRY:%.*]] ], [ [[ARG:%.*]], [[LATCH:%.*]] ]
; CHECK-NEXT: [[J2:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[J_NEXT:%.*]], [[LATCH]] ]
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i64 [[J2]], [[LENGTH_EXT]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK: exitLatch:
; CHECK-NEXT: ret i64 1
; CHECK: exit:
-; CHECK-NEXT: [[RESULT_IN3_LCSSA:%.*]] = phi i64* [ [[RESULT_IN3]], [[HEADER]] ]
-; CHECK-NEXT: [[RESULT_LE:%.*]] = load i64, i64* [[RESULT_IN3_LCSSA]], align 8
+; CHECK-NEXT: [[RESULT_IN3_LCSSA:%.*]] = phi ptr [ [[RESULT_IN3]], [[HEADER]] ]
+; CHECK-NEXT: [[RESULT_LE:%.*]] = load i64, ptr [[RESULT_IN3_LCSSA]], align 8
; CHECK-NEXT: ret i64 [[RESULT_LE]]
;
entry:
%length.ext = zext i32 %length to i64
- %n.pre = load i64, i64* %n_addr, align 4
+ %n.pre = load i64, ptr %n_addr, align 4
br label %Header
Header: ; preds = %entry, %Latch
- %result.in3 = phi i64* [ %arg2, %entry ], [ %arg, %Latch ]
+ %result.in3 = phi ptr [ %arg2, %entry ], [ %arg, %Latch ]
%j2 = phi i64 [ 0, %entry ], [ %j.next, %Latch ]
%within.bounds = icmp ult i64 %j2, %length.ext
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
ret i64 1
exit: ; preds = %Header
- %result.in3.lcssa = phi i64* [ %result.in3, %Header ]
- %result.le = load i64, i64* %result.in3.lcssa, align 8
+ %result.in3.lcssa = phi ptr [ %result.in3, %Header ]
+ %result.le = load i64, ptr %result.in3.lcssa, align 8
ret i64 %result.le
}
declare i64 @llvm.experimental.deoptimize.i64(...)
declare void @llvm.experimental.guard(i1, ...)
-define i32 @signed_reverse_loop_n_to_lower_limit(i32* %array, i32 %length, i32 %n, i32 %lowerlimit) {
+define i32 @signed_reverse_loop_n_to_lower_limit(ptr %array, i32 %length, i32 %n, i32 %lowerlimit) {
; CHECK-LABEL: @signed_reverse_loop_n_to_lower_limit(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP3]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I_NEXT]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp sgt i32 [[I]], [[LOWERLIMIT]]
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
%within.bounds = icmp ult i32 %i.next, %length
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i.next to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%continue = icmp sgt i32 %i, %lowerlimit
br i1 %continue, label %loop, label %exit
ret i32 %result
}
-define i32 @unsigned_reverse_loop_n_to_lower_limit(i32* %array, i32 %length, i32 %n, i32 %lowerlimit) {
+define i32 @unsigned_reverse_loop_n_to_lower_limit(ptr %array, i32 %length, i32 %n, i32 %lowerlimit) {
; CHECK-LABEL: @unsigned_reverse_loop_n_to_lower_limit(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP3]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I_NEXT]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ugt i32 [[I]], [[LOWERLIMIT]]
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
%within.bounds = icmp ult i32 %i.next, %length
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i.next to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%continue = icmp ugt i32 %i, %lowerlimit
br i1 %continue, label %loop, label %exit
; if we predicated the loop, the guard will definitely fail and we will
; deoptimize early on.
-define i32 @unsigned_reverse_loop_n_to_0(i32* %array, i32 %length, i32 %n, i32 %lowerlimit) {
+define i32 @unsigned_reverse_loop_n_to_0(ptr %array, i32 %length, i32 %n, i32 %lowerlimit) {
; CHECK-LABEL: @unsigned_reverse_loop_n_to_0(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I_NEXT]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ugt i32 [[I]], 0
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
%within.bounds = icmp ult i32 %i.next, %length
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i.next to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%continue = icmp ugt i32 %i, 0
br i1 %continue, label %loop, label %exit
}
; do not loop predicate when the range has step -1 and latch has step 1.
-define i32 @reverse_loop_range_step_increment(i32 %n, i32* %array, i32 %length) {
+define i32 @reverse_loop_range_step_increment(i32 %n, ptr %array, i32 %length) {
; CHECK-LABEL: @reverse_loop_range_step_increment(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[IRC]], [[LENGTH:%.*]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[IRC]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], -1
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ugt i32 [[I]], 65534
%within.bounds = icmp ult i32 %irc, %length
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %irc to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%i.next = add nsw i32 %i, -1
%loop.acc.next = add i32 %loop.acc, %array.i
%continue = icmp ugt i32 %i, 65534
ret i32 %result
}
-define i32 @signed_reverse_loop_n_to_lower_limit_equal(i32* %array, i32 %length, i32 %n, i32 %lowerlimit) {
+define i32 @signed_reverse_loop_n_to_lower_limit_equal(ptr %array, i32 %length, i32 %n, i32 %lowerlimit) {
; CHECK-LABEL: @signed_reverse_loop_n_to_lower_limit_equal(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP3]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I_NEXT]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp sge i32 [[I]], [[LOWERLIMIT]]
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
%within.bounds = icmp ult i32 %i.next, %length
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i.next to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%continue = icmp sge i32 %i, %lowerlimit
br i1 %continue, label %loop, label %exit
ret i32 %result
}
-define i32 @unsigned_reverse_loop_n_to_lower_limit_equal(i32* %array, i32 %length, i32 %n, i32 %lowerlimit) {
+define i32 @unsigned_reverse_loop_n_to_lower_limit_equal(ptr %array, i32 %length, i32 %n, i32 %lowerlimit) {
; CHECK-LABEL: @unsigned_reverse_loop_n_to_lower_limit_equal(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP3]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I_NEXT]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp uge i32 [[I]], [[LOWERLIMIT]]
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
%within.bounds = icmp ult i32 %i.next, %length
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i.next to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%continue = icmp uge i32 %i, %lowerlimit
br i1 %continue, label %loop, label %exit
; if we predicated the loop, the guard will definitely fail and we will
; deoptimize early on.
-define i32 @unsigned_reverse_loop_n_to_1(i32* %array, i32 %length, i32 %n, i32 %lowerlimit) {
+define i32 @unsigned_reverse_loop_n_to_1(ptr %array, i32 %length, i32 %n, i32 %lowerlimit) {
; CHECK-LABEL: @unsigned_reverse_loop_n_to_1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I_NEXT]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp uge i32 [[I]], 1
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
%within.bounds = icmp ult i32 %i.next, %length
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i.next to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%continue = icmp uge i32 %i, 1
br i1 %continue, label %loop, label %exit
;; Error checking is rather silly here - it should pass compilation successfully,
;; in bad case it will just timeout.
;;
-define i32 @unswitch_exit_form_with_endless_loop(i32* %array, i32 %length, i32 %n, i1 %cond_0) {
+define i32 @unswitch_exit_form_with_endless_loop(ptr %array, i32 %length, i32 %n, i1 %cond_0) {
; CHECK-LABEL: @unswitch_exit_form_with_endless_loop
entry:
%widenable_cond = call i1 @llvm.experimental.widenable.condition()
guarded:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
declare void @llvm.experimental.guard(i1, ...)
-define i32 @test_visited(i32* %array, i32 %length, i32 %n, i32 %x) {
+define i32 @test_visited(ptr %array, i32 %length, i32 %n, i32 %x) {
; CHECK-LABEL: @test_visited(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP3]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[GUARD_COND_99]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
call void (i1, ...) @llvm.experimental.guard(i1 %guard.cond.99, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
; RUN: opt -S -passes='require<scalar-evolution>,loop-mssa(loop-predication)' -verify-memoryssa < %s 2>&1 | FileCheck %s
declare void @llvm.experimental.guard(i1, ...)
-declare i32 @length(i8*)
+declare i32 @length(ptr)
-declare i16 @short_length(i8*)
+declare i16 @short_length(ptr)
; Consider range check of type i16 and i32, while IV is of type i64
; We can loop predicate this because the IV range is within i16 and within i32.
-define i64 @iv_wider_type_rc_two_narrow_types(i32 %offA, i16 %offB, i8* %arrA, i8* %arrB) {
+define i64 @iv_wider_type_rc_two_narrow_types(i32 %offA, i16 %offB, ptr %arrA, ptr %arrB) {
; CHECK-LABEL: @iv_wider_type_rc_two_narrow_types(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[LENGTHA:%.*]] = call i32 @length(i8* [[ARRA:%.*]])
-; CHECK-NEXT: [[LENGTHB:%.*]] = call i16 @short_length(i8* [[ARRB:%.*]])
+; CHECK-NEXT: [[LENGTHA:%.*]] = call i32 @length(ptr [[ARRA:%.*]])
+; CHECK-NEXT: [[LENGTHB:%.*]] = call i16 @short_length(ptr [[ARRB:%.*]])
; CHECK-NEXT: [[TMP0:%.*]] = sub i16 [[LENGTHB]], [[OFFB:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ule i16 16, [[TMP0]]
; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i16 [[OFFB]], [[LENGTHB]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP8]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WIDE_CHK]])
; CHECK-NEXT: [[INDEXA_EXT:%.*]] = zext i32 [[INDEXA]] to i64
-; CHECK-NEXT: [[ADDRA:%.*]] = getelementptr inbounds i8, i8* [[ARRA]], i64 [[INDEXA_EXT]]
-; CHECK-NEXT: [[ELTA:%.*]] = load i8, i8* [[ADDRA]], align 1
+; CHECK-NEXT: [[ADDRA:%.*]] = getelementptr inbounds i8, ptr [[ARRA]], i64 [[INDEXA_EXT]]
+; CHECK-NEXT: [[ELTA:%.*]] = load i8, ptr [[ADDRA]], align 1
; CHECK-NEXT: [[INDEXB_EXT:%.*]] = zext i16 [[INDEXB]] to i64
-; CHECK-NEXT: [[ADDRB:%.*]] = getelementptr inbounds i8, i8* [[ARRB]], i64 [[INDEXB_EXT]]
-; CHECK-NEXT: store i8 [[ELTA]], i8* [[ADDRB]], align 1
+; CHECK-NEXT: [[ADDRB:%.*]] = getelementptr inbounds i8, ptr [[ARRB]], i64 [[INDEXB_EXT]]
+; CHECK-NEXT: store i8 [[ELTA]], ptr [[ADDRB]], align 1
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[LATCH_CHECK:%.*]] = icmp ult i64 [[IV_NEXT]], 16
; CHECK-NEXT: br i1 [[LATCH_CHECK]], label [[LOOP]], label [[EXIT:%.*]]
; CHECK-NEXT: ret i64 [[IV_LCSSA]]
;
entry:
- %lengthA = call i32 @length(i8* %arrA)
- %lengthB = call i16 @short_length(i8* %arrB)
+ %lengthA = call i32 @length(ptr %arrA)
+ %lengthB = call i16 @short_length(ptr %arrB)
br label %loop
loop:
%wide.chk = and i1 %rcA, %rcB
call void (i1, ...) @llvm.experimental.guard(i1 %wide.chk, i32 9) [ "deopt"() ]
%indexA.ext = zext i32 %indexA to i64
- %addrA = getelementptr inbounds i8, i8* %arrA, i64 %indexA.ext
- %eltA = load i8, i8* %addrA
+ %addrA = getelementptr inbounds i8, ptr %arrA, i64 %indexA.ext
+ %eltA = load i8, ptr %addrA
%indexB.ext = zext i16 %indexB to i64
- %addrB = getelementptr inbounds i8, i8* %arrB, i64 %indexB.ext
- store i8 %eltA, i8* %addrB
+ %addrB = getelementptr inbounds i8, ptr %arrB, i64 %indexB.ext
+ store i8 %eltA, ptr %addrB
%iv.next = add nuw nsw i64 %iv, 1
%latch.check = icmp ult i64 %iv.next, 16
br i1 %latch.check, label %loop, label %exit
; Consider an IV of type long and an array access into int array.
; IV is of type i64 while the range check operands are of type i32 and i64.
-define i64 @iv_rc_different_types(i32 %offA, i32 %offB, i8* %arrA, i8* %arrB, i64 %max)
+define i64 @iv_rc_different_types(i32 %offA, i32 %offB, ptr %arrA, ptr %arrB, i64 %max)
; CHECK-LABEL: @iv_rc_different_types(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[LENGTHA:%.*]] = call i32 @length(i8* [[ARRA:%.*]])
-; CHECK-NEXT: [[LENGTHB:%.*]] = call i32 @length(i8* [[ARRB:%.*]])
+; CHECK-NEXT: [[LENGTHA:%.*]] = call i32 @length(ptr [[ARRA:%.*]])
+; CHECK-NEXT: [[LENGTHB:%.*]] = call i32 @length(ptr [[ARRB:%.*]])
; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[LENGTHB]], -1
; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[TMP0]], [[OFFB:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = icmp ule i32 15, [[TMP1]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP15]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WIDE_CHK_FINAL]])
; CHECK-NEXT: [[INDEXA_EXT:%.*]] = zext i32 [[INDEXA]] to i64
-; CHECK-NEXT: [[ADDRA:%.*]] = getelementptr inbounds i8, i8* [[ARRA]], i64 [[INDEXA_EXT]]
-; CHECK-NEXT: [[ELTA:%.*]] = load i8, i8* [[ADDRA]], align 1
+; CHECK-NEXT: [[ADDRA:%.*]] = getelementptr inbounds i8, ptr [[ARRA]], i64 [[INDEXA_EXT]]
+; CHECK-NEXT: [[ELTA:%.*]] = load i8, ptr [[ADDRA]], align 1
; CHECK-NEXT: [[INDEXB_EXT:%.*]] = zext i32 [[INDEXB]] to i64
-; CHECK-NEXT: [[ADDRB:%.*]] = getelementptr inbounds i8, i8* [[ARRB]], i64 [[INDEXB_EXT]]
-; CHECK-NEXT: [[ELTB:%.*]] = load i8, i8* [[ADDRB]], align 1
+; CHECK-NEXT: [[ADDRB:%.*]] = getelementptr inbounds i8, ptr [[ARRB]], i64 [[INDEXB_EXT]]
+; CHECK-NEXT: [[ELTB:%.*]] = load i8, ptr [[ADDRB]], align 1
; CHECK-NEXT: [[RESULT:%.*]] = xor i8 [[ELTA]], [[ELTB]]
-; CHECK-NEXT: store i8 [[RESULT]], i8* [[ADDRA]], align 1
+; CHECK-NEXT: store i8 [[RESULT]], ptr [[ADDRA]], align 1
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[LATCH_CHECK:%.*]] = icmp ult i64 [[IV]], 15
; CHECK-NEXT: br i1 [[LATCH_CHECK]], label [[LOOP]], label [[EXIT:%.*]]
;
{
entry:
- %lengthA = call i32 @length(i8* %arrA)
- %lengthB = call i32 @length(i8* %arrB)
+ %lengthA = call i32 @length(ptr %arrA)
+ %lengthB = call i32 @length(ptr %arrB)
br label %loop
loop:
%wide.chk.final = and i1 %wide.chk, %rcB
call void (i1, ...) @llvm.experimental.guard(i1 %wide.chk.final, i32 9) [ "deopt"() ]
%indexA.ext = zext i32 %indexA to i64
- %addrA = getelementptr inbounds i8, i8* %arrA, i64 %indexA.ext
- %eltA = load i8, i8* %addrA
+ %addrA = getelementptr inbounds i8, ptr %arrA, i64 %indexA.ext
+ %eltA = load i8, ptr %addrA
%indexB.ext = zext i32 %indexB to i64
- %addrB = getelementptr inbounds i8, i8* %arrB, i64 %indexB.ext
- %eltB = load i8, i8* %addrB
+ %addrB = getelementptr inbounds i8, ptr %arrB, i64 %indexB.ext
+ %eltB = load i8, ptr %addrB
%result = xor i8 %eltA, %eltB
- store i8 %result, i8* %addrA
+ store i8 %result, ptr %addrA
%iv.next = add nuw nsw i64 %iv, 1
%latch.check = icmp ult i64 %iv, 15
br i1 %latch.check, label %loop, label %exit
; cannot narrow the IV to the range type, because we lose information.
; for (i64 i= 5; i>= 2; i++)
; this loop wraps around after reaching 2^64.
-define i64 @iv_rc_different_type(i32 %offA, i8* %arrA) {
+define i64 @iv_rc_different_type(i32 %offA, ptr %arrA) {
; CHECK-LABEL: @iv_rc_different_type(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[LENGTHA:%.*]] = call i32 @length(i8* [[ARRA:%.*]])
+; CHECK-NEXT: [[LENGTHA:%.*]] = call i32 @length(ptr [[ARRA:%.*]])
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 5, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[RCA:%.*]] = icmp ult i32 [[INDEXA]], [[LENGTHA]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[RCA]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[INDEXA_EXT:%.*]] = zext i32 [[INDEXA]] to i64
-; CHECK-NEXT: [[ADDRA:%.*]] = getelementptr inbounds i8, i8* [[ARRA]], i64 [[INDEXA_EXT]]
-; CHECK-NEXT: [[ELTA:%.*]] = load i8, i8* [[ADDRA]], align 1
+; CHECK-NEXT: [[ADDRA:%.*]] = getelementptr inbounds i8, ptr [[ARRA]], i64 [[INDEXA_EXT]]
+; CHECK-NEXT: [[ELTA:%.*]] = load i8, ptr [[ADDRA]], align 1
; CHECK-NEXT: [[RES:%.*]] = add i8 [[ELTA]], 2
-; CHECK-NEXT: store i8 [[ELTA]], i8* [[ADDRA]], align 1
+; CHECK-NEXT: store i8 [[ELTA]], ptr [[ADDRA]], align 1
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[LATCH_CHECK:%.*]] = icmp sge i64 [[IV_NEXT]], 2
; CHECK-NEXT: br i1 [[LATCH_CHECK]], label [[LOOP]], label [[EXIT:%.*]]
; CHECK-NEXT: ret i64 [[IV_LCSSA]]
;
entry:
- %lengthA = call i32 @length(i8* %arrA)
+ %lengthA = call i32 @length(ptr %arrA)
br label %loop
loop:
%rcA = icmp ult i32 %indexA, %lengthA
call void (i1, ...) @llvm.experimental.guard(i1 %rcA, i32 9) [ "deopt"() ]
%indexA.ext = zext i32 %indexA to i64
- %addrA = getelementptr inbounds i8, i8* %arrA, i64 %indexA.ext
- %eltA = load i8, i8* %addrA
+ %addrA = getelementptr inbounds i8, ptr %arrA, i64 %indexA.ext
+ %eltA = load i8, ptr %addrA
%res = add i8 %eltA, 2
- store i8 %eltA, i8* %addrA
+ store i8 %eltA, ptr %addrA
%iv.next = add i64 %iv, 1
%latch.check = icmp sge i64 %iv.next, 2
br i1 %latch.check, label %loop, label %exit