; PR 1334
; RUN: opt < %s -passes=loop-unroll -disable-output
-define void @sal__math_float_manipulator_7__math__joint_array_dcv_ops__Omultiply__3([6 x float]* %agg.result) {
+define void @sal__math_float_manipulator_7__math__joint_array_dcv_ops__Omultiply__3(ptr %agg.result) {
entry:
%tmp282911 = zext i8 0 to i32 ; <i32> [#uses=1]
br label %cond_next
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
target triple = "i686-apple-darwin8"
- %struct.__mpz_struct = type { i32, i32, i32* }
+ %struct.__mpz_struct = type { i32, i32, ptr }
-define void @Foo(%struct.__mpz_struct* %base) {
+define void @Foo(ptr %base) {
entry:
- %want = alloca [1 x %struct.__mpz_struct], align 16 ; <[1 x %struct.__mpz_struct]*> [#uses=4]
- %want1 = getelementptr [1 x %struct.__mpz_struct], [1 x %struct.__mpz_struct]* %want, i32 0, i32 0 ; <%struct.__mpz_struct*> [#uses=1]
- call void @__gmpz_init( %struct.__mpz_struct* %want1 )
- %want27 = getelementptr [1 x %struct.__mpz_struct], [1 x %struct.__mpz_struct]* %want, i32 0, i32 0 ; <%struct.__mpz_struct*> [#uses=1]
- %want3 = getelementptr [1 x %struct.__mpz_struct], [1 x %struct.__mpz_struct]* %want, i32 0, i32 0 ; <%struct.__mpz_struct*> [#uses=1]
- %want2 = getelementptr [1 x %struct.__mpz_struct], [1 x %struct.__mpz_struct]* %want, i32 0, i32 0 ; <%struct.__mpz_struct*> [#uses=2]
+ %want = alloca [1 x %struct.__mpz_struct], align 16 ; <ptr> [#uses=4]
+ %want1 = getelementptr [1 x %struct.__mpz_struct], ptr %want, i32 0, i32 0 ; <ptr> [#uses=1]
+ call void @__gmpz_init( ptr %want1 )
+ %want27 = getelementptr [1 x %struct.__mpz_struct], ptr %want, i32 0, i32 0 ; <ptr> [#uses=1]
+ %want3 = getelementptr [1 x %struct.__mpz_struct], ptr %want, i32 0, i32 0 ; <ptr> [#uses=1]
+ %want2 = getelementptr [1 x %struct.__mpz_struct], ptr %want, i32 0, i32 0 ; <ptr> [#uses=2]
br label %bb
bb: ; preds = %bb, %entry
%i.01.0 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=1]
- %want23.0 = phi %struct.__mpz_struct* [ %want27, %entry ], [ %want2, %bb ] ; <%struct.__mpz_struct*> [#uses=1]
- call void @__gmpz_mul( %struct.__mpz_struct* %want23.0, %struct.__mpz_struct* %want3, %struct.__mpz_struct* %base )
+ %want23.0 = phi ptr [ %want27, %entry ], [ %want2, %bb ] ; <ptr> [#uses=1]
+ call void @__gmpz_mul( ptr %want23.0, ptr %want3, ptr %base )
%indvar.next = add i32 %i.01.0, 1 ; <i32> [#uses=2]
%exitcond = icmp ne i32 %indvar.next, 2 ; <i1> [#uses=1]
br i1 %exitcond, label %bb, label %bb10
bb10: ; preds = %bb
- %want2.lcssa = phi %struct.__mpz_struct* [ %want2, %bb ] ; <%struct.__mpz_struct*> [#uses=1]
- call void @__gmpz_clear( %struct.__mpz_struct* %want2.lcssa )
+ %want2.lcssa = phi ptr [ %want2, %bb ] ; <ptr> [#uses=1]
+ call void @__gmpz_clear( ptr %want2.lcssa )
ret void
}
-declare void @__gmpz_init(%struct.__mpz_struct*)
-declare void @__gmpz_mul(%struct.__mpz_struct*, %struct.__mpz_struct*, %struct.__mpz_struct*)
-declare void @__gmpz_clear(%struct.__mpz_struct*)
+declare void @__gmpz_init(ptr)
+declare void @__gmpz_mul(ptr, ptr, ptr)
+declare void @__gmpz_clear(ptr)
; PR1770
; PR1947
- %struct.cl_engine = type { i32, i16, i32, i8**, i8**, i8*, i8*, i8*, i8*, i8*, i8*, i8* }
+ %struct.cl_engine = type { i32, i16, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
%struct.cl_limits = type { i32, i32, i32, i32, i16, i64 }
- %struct.cli_ac_alt = type { i8, i8*, i16, i16, %struct.cli_ac_alt* }
- %struct.cli_ac_node = type { i8, i8, %struct.cli_ac_patt*, %struct.cli_ac_node**, %struct.cli_ac_node* }
- %struct.cli_ac_patt = type { i16*, i16*, i16, i16, i8, i32, i32, i8*, i8*, i32, i16, i16, i16, i16, %struct.cli_ac_alt**, i8, i16, %struct.cli_ac_patt*, %struct.cli_ac_patt* }
- %struct.cli_bm_patt = type { i8*, i32, i8*, i8*, i8, %struct.cli_bm_patt* }
- %struct.cli_ctx = type { i8**, i64*, %struct.cli_matcher*, %struct.cl_engine*, %struct.cl_limits*, i32, i32, i32, i32, %struct.cli_dconf* }
+ %struct.cli_ac_alt = type { i8, ptr, i16, i16, ptr }
+ %struct.cli_ac_node = type { i8, i8, ptr, ptr, ptr }
+ %struct.cli_ac_patt = type { ptr, ptr, i16, i16, i8, i32, i32, ptr, ptr, i32, i16, i16, i16, i16, ptr, i8, i16, ptr, ptr }
+ %struct.cli_bm_patt = type { ptr, i32, ptr, ptr, i8, ptr }
+ %struct.cli_ctx = type { ptr, ptr, ptr, ptr, ptr, i32, i32, i32, i32, ptr }
%struct.cli_dconf = type { i32, i32, i32, i32, i32, i32, i32 }
- %struct.cli_matcher = type { i16, i8, i32*, %struct.cli_bm_patt**, i32*, i32, i8, i8, %struct.cli_ac_node*, %struct.cli_ac_node**, %struct.cli_ac_patt**, i32, i32, i32 }
+ %struct.cli_matcher = type { i16, i8, ptr, ptr, ptr, i32, i8, i8, ptr, ptr, ptr, i32, i32, i32 }
-declare i8* @calloc(i64, i64)
+declare ptr @calloc(i64, i64)
-define fastcc i32 @cli_scanpe(i32 %desc, %struct.cli_ctx* %ctx) {
+define fastcc i32 @cli_scanpe(i32 %desc, ptr %ctx) {
entry:
br i1 false, label %cond_next17, label %cond_true14
ret i32 0
cond_next54.i: ; preds = %bb36.i
- %tmp10.i.i527 = call i8* @calloc( i64 0, i64 1 ) ; <i8*> [#uses=1]
+ %tmp10.i.i527 = call ptr @calloc( i64 0, i64 1 ) ; <ptr> [#uses=1]
br i1 false, label %cond_next11.i.i, label %bb132.i
bb132.i: ; preds = %cond_next54.i
ret i32 0
cond_true1008.critedge1190.i: ; preds = %cond_true784.i
- %tmp621.i532.lcssa610 = phi i8* [ %tmp10.i.i527, %cond_true784.i ] ; <i8*> [#uses=0]
+ %tmp621.i532.lcssa610 = phi ptr [ %tmp10.i.i527, %cond_true784.i ] ; <ptr> [#uses=0]
ret i32 0
bb9065: ; preds = %cond_next8154
; PR7318: assertion failure after doing a simple loop unroll
;
-define i32 @test2(i32* nocapture %p, i32 %n) nounwind readonly {
+define i32 @test2(ptr nocapture %p, i32 %n) nounwind readonly {
;
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
; CHECK: bb:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[INDVAR_NEXT_3:%.*]], [[BB1_3:%.*]] ]
; CHECK-NEXT: [[S_01:%.*]] = phi i32 [ 0, [[BB_NPH]] ], [ [[TMP8:%.*]], [[BB1_3]] ]
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[P:%.*]], i64 [[INDVAR]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[SCEVGEP]], align 1
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[INDVAR]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[SCEVGEP]], align 1
; CHECK-NEXT: [[TMP2:%.*]] = add nsw i32 [[TMP1]], [[S_01]]
; CHECK-NEXT: br label [[BB1:%.*]]
; CHECK: bb1:
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVAR_NEXT]], [[TMP]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[BB_1:%.*]], label [[BB1_BB2_CRIT_EDGE:%.*]]
; CHECK: bb.1:
-; CHECK-NEXT: [[SCEVGEP_1:%.*]] = getelementptr i32, i32* [[P]], i64 [[INDVAR_NEXT]]
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[SCEVGEP_1]], align 1
+; CHECK-NEXT: [[SCEVGEP_1:%.*]] = getelementptr i32, ptr [[P]], i64 [[INDVAR_NEXT]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[SCEVGEP_1]], align 1
; CHECK-NEXT: [[TMP4:%.*]] = add nsw i32 [[TMP3]], [[TMP2]]
; CHECK-NEXT: br label [[BB1_1:%.*]]
; CHECK: bb1.1:
; CHECK-NEXT: [[EXITCOND_1:%.*]] = icmp ne i64 [[INDVAR_NEXT_1]], [[TMP]]
; CHECK-NEXT: br i1 [[EXITCOND_1]], label [[BB_2:%.*]], label [[BB1_BB2_CRIT_EDGE]]
; CHECK: bb.2:
-; CHECK-NEXT: [[SCEVGEP_2:%.*]] = getelementptr i32, i32* [[P]], i64 [[INDVAR_NEXT_1]]
-; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[SCEVGEP_2]], align 1
+; CHECK-NEXT: [[SCEVGEP_2:%.*]] = getelementptr i32, ptr [[P]], i64 [[INDVAR_NEXT_1]]
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[SCEVGEP_2]], align 1
; CHECK-NEXT: [[TMP6:%.*]] = add nsw i32 [[TMP5]], [[TMP4]]
; CHECK-NEXT: br label [[BB1_2:%.*]]
; CHECK: bb1.2:
; CHECK-NEXT: [[EXITCOND_2:%.*]] = icmp ne i64 [[INDVAR_NEXT_2]], [[TMP]]
; CHECK-NEXT: br i1 [[EXITCOND_2]], label [[BB_3:%.*]], label [[BB1_BB2_CRIT_EDGE]]
; CHECK: bb.3:
-; CHECK-NEXT: [[SCEVGEP_3:%.*]] = getelementptr i32, i32* [[P]], i64 [[INDVAR_NEXT_2]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[SCEVGEP_3]], align 1
+; CHECK-NEXT: [[SCEVGEP_3:%.*]] = getelementptr i32, ptr [[P]], i64 [[INDVAR_NEXT_2]]
+; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[SCEVGEP_3]], align 1
; CHECK-NEXT: [[TMP8]] = add nsw i32 [[TMP7]], [[TMP6]]
; CHECK-NEXT: br label [[BB1_3]]
; CHECK: bb1.3:
bb: ; preds = %bb.nph, %bb1
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %bb1 ] ; <i64> [#uses=2]
%s.01 = phi i32 [ 0, %bb.nph ], [ %2, %bb1 ] ; <i32> [#uses=1]
- %scevgep = getelementptr i32, i32* %p, i64 %indvar ; <i32*> [#uses=1]
- %1 = load i32, i32* %scevgep, align 1 ; <i32> [#uses=1]
+ %scevgep = getelementptr i32, ptr %p, i64 %indvar ; <ptr> [#uses=1]
+ %1 = load i32, ptr %scevgep, align 1 ; <i32> [#uses=1]
%2 = add nsw i32 %1, %s.01 ; <i32> [#uses=2]
br label %bb1
; CHECK-NEXT: [[COND2:%.*]] = call zeroext i1 @check()
; CHECK-NEXT: br i1 [[COND2]], label [[EXIT:%.*]], label [[DO_COND:%.*]]
; CHECK: exit:
-; CHECK-NEXT: [[TMP7_I:%.*]] = load i32, i32* undef, align 8
+; CHECK-NEXT: [[TMP7_I:%.*]] = load i32, ptr undef, align 8
; CHECK-NEXT: br i1 undef, label [[DO_COND]], label [[LAND_LHS_TRUE:%.*]]
; CHECK: land.lhs.true:
; CHECK-NEXT: br i1 true, label [[RETURN_LOOPEXIT:%.*]], label [[DO_COND]]
; CHECK-NEXT: [[COND2_1:%.*]] = call zeroext i1 @check()
; CHECK-NEXT: br i1 [[COND2_1]], label [[EXIT_1:%.*]], label [[DO_COND_1:%.*]]
; CHECK: exit.1:
-; CHECK-NEXT: [[TMP7_I_1:%.*]] = load i32, i32* undef, align 8
+; CHECK-NEXT: [[TMP7_I_1:%.*]] = load i32, ptr undef, align 8
; CHECK-NEXT: br i1 undef, label [[DO_COND_1]], label [[LAND_LHS_TRUE_1:%.*]]
; CHECK: land.lhs.true.1:
; CHECK-NEXT: br i1 true, label [[RETURN_LOOPEXIT]], label [[DO_COND_1]]
; CHECK-NEXT: [[COND2_2:%.*]] = call zeroext i1 @check()
; CHECK-NEXT: br i1 [[COND2_2]], label [[EXIT_2:%.*]], label [[DO_COND_2:%.*]]
; CHECK: exit.2:
-; CHECK-NEXT: [[TMP7_I_2:%.*]] = load i32, i32* undef, align 8
+; CHECK-NEXT: [[TMP7_I_2:%.*]] = load i32, ptr undef, align 8
; CHECK-NEXT: br i1 undef, label [[DO_COND_2]], label [[LAND_LHS_TRUE_2:%.*]]
; CHECK: land.lhs.true.2:
; CHECK-NEXT: br i1 true, label [[RETURN_LOOPEXIT]], label [[DO_COND_2]]
; CHECK-NEXT: [[COND2_3:%.*]] = call zeroext i1 @check()
; CHECK-NEXT: br i1 [[COND2_3]], label [[EXIT_3:%.*]], label [[DO_COND_3:%.*]]
; CHECK: exit.3:
-; CHECK-NEXT: [[TMP7_I_3:%.*]] = load i32, i32* undef, align 8
+; CHECK-NEXT: [[TMP7_I_3:%.*]] = load i32, ptr undef, align 8
; CHECK-NEXT: br i1 undef, label [[DO_COND_3]], label [[LAND_LHS_TRUE_3:%.*]]
; CHECK: land.lhs.true.3:
; CHECK-NEXT: br i1 true, label [[RETURN_LOOPEXIT]], label [[DO_COND_3]]
br i1 %cond2, label %exit, label %do.cond
exit: ; preds = %do.body
- %tmp7.i = load i32, i32* undef, align 8
+ %tmp7.i = load i32, ptr undef, align 8
br i1 undef, label %do.cond, label %land.lhs.true
land.lhs.true: ; preds = %exit
; CHECK: while.body:
; CHECK-NOT: while.body.1:
; CHECK: %shr.1 = lshr i32 %bit_addr.addr.01, 5
-; CHECK: %arrayidx.1 = getelementptr inbounds i32, i32* %bitmap, i32 %shr.1
+; CHECK: %arrayidx.1 = getelementptr inbounds i32, ptr %bitmap, i32 %shr.1
; CHECK: %shr.2 = lshr i32 %bit_addr.addr.01, 5
-; CHECK: %arrayidx.2 = getelementptr inbounds i32, i32* %bitmap, i32 %shr.2
+; CHECK: %arrayidx.2 = getelementptr inbounds i32, ptr %bitmap, i32 %shr.2
; CHECK: %shr.3 = lshr i32 %bit_addr.addr.01, 5
-; CHECK: %arrayidx.3 = getelementptr inbounds i32, i32* %bitmap, i32 %shr.3
-define void @FlipBit(i32* nocapture %bitmap, i32 %bit_addr, i32 %nbits) nounwind {
+; CHECK: %arrayidx.3 = getelementptr inbounds i32, ptr %bitmap, i32 %shr.3
+define void @FlipBit(ptr nocapture %bitmap, i32 %bit_addr, i32 %nbits) nounwind {
entry:
br label %while.body
%shr = lshr i32 %bit_addr.addr.01, 5
%rem = and i32 %bit_addr.addr.01, 31
%shl = shl i32 1, %rem
- %arrayidx = getelementptr inbounds i32, i32* %bitmap, i32 %shr
- %tmp6 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %bitmap, i32 %shr
+ %tmp6 = load i32, ptr %arrayidx, align 4
%xor = xor i32 %tmp6, %shl
- store i32 %xor, i32* %arrayidx, align 4
+ store i32 %xor, ptr %arrayidx, align 4
%inc = add i32 %bit_addr.addr.01, 1
%tobool = icmp eq i32 %dec, 0
br i1 %tobool, label %while.end, label %while.body
; CHECK: %add.1 = add nsw i32 %conv.1, %conv
; CHECK: %add.18 = add nsw i32 %conv.18, %add.17
; CHECK: ret i32 %add.18
-define i32 @test(i8* %arr) nounwind uwtable readnone {
+define i32 @test(ptr %arr) nounwind uwtable readnone {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.02 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %arr, i64 %indvars.iv
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %arr, i64 %indvars.iv
+ %0 = load i8, ptr %arrayidx, align 1
%conv = sext i8 %0 to i32
%add = add nsw i32 %conv, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
declare void @subtract() nounwind uwtable
; CHECK-NOT: unreachable
-define i32 @main(i32 %argc, i8** nocapture %argv) nounwind uwtable {
+define i32 @main(i32 %argc, ptr nocapture %argv) nounwind uwtable {
entry:
%vals19 = alloca [5 x i32], align 16
%x20 = alloca i32, align 4
- store i32 135, i32* %x20, align 4
+ store i32 135, ptr %x20, align 4
br label %for.body
for.body: ; preds = ; %call2_termjoin, %call3_termjoin
%indvars.iv = phi i64 [ 0, %entry ], [ %joinphi15.in.in, %call2_termjoin ]
- %a6 = call coldcc i8* @funca(i8* blockaddress(@main, %for.body_code), i8*
+ %a6 = call coldcc ptr @funca(ptr blockaddress(@main, %for.body_code), ptr
blockaddress(@main, %for.body_codeprime)) nounwind
- indirectbr i8* %a6, [label %for.body_code, label %for.body_codeprime]
+ indirectbr ptr %a6, [label %for.body_code, label %for.body_codeprime]
for.body_code: ; preds = %for.body
call void @subtract()
br label %call2_termjoin
}
-declare coldcc i8* @funca(i8*, i8*) readonly
+declare coldcc ptr @funca(ptr, ptr) readonly
declare void @subtract_v2(i64) nounwind uwtable
; CHECK-NEXT: icmp
; CHECK-NEXT: br
; CHECK-NEXT-LABEL: exit:
-define void @unroll1(i32* %p, i32* %p2) {
+define void @unroll1(ptr %p, ptr %p2) {
entry:
br label %loop
loop:
%iv = phi i32 [ 0, %entry ], [ %inc, %loop ]
- %gep = getelementptr inbounds i32, i32* %p, i32 %iv
- %load = load volatile i32, i32* %gep
+ %gep = getelementptr inbounds i32, ptr %p, i32 %iv
+ %load = load volatile i32, ptr %gep
- %gep2 = getelementptr inbounds i32, i32* %p2, i32 %iv
- %load2 = load volatile i32, i32* %gep2
+ %gep2 = getelementptr inbounds i32, ptr %p2, i32 %iv
+ %load2 = load volatile i32, ptr %gep2
%inc = add i32 %iv, 1
%exitcnd = icmp uge i32 %inc, 1024
; CHECK-NEXT: br
; CHECK-NEXT-LABEL: exit2:
-define void @unroll2(i32* %p) {
+define void @unroll2(ptr %p) {
entry:
br label %loop1
loop2:
%iv2 = phi i32 [ 0, %loop2.header ], [ %inc2, %loop2 ]
%sum = phi i32 [ %outer.sum, %loop2.header ], [ %sum.inc, %loop2 ]
- %gep = getelementptr inbounds i32, i32* %p, i32 %iv2
- %load = load i32, i32* %gep
+ %gep = getelementptr inbounds i32, ptr %p, i32 %iv2
+ %load = load i32, ptr %gep
%sum.inc = add i32 %sum, %load
%inc2 = add i32 %iv2, 1
%exitcnd2 = icmp uge i32 %inc2, 1024
; the output.
; UNROLL-LABEL: @test
-; UNROLL: load i32, i32*
-; UNROLL: load i32, i32*
-; UNROLL: load i32, i32*
-; UNROLL-NOT: load i32, i32*
+; UNROLL: load i32, ptr
+; UNROLL: load i32, ptr
+; UNROLL: load i32, ptr
+; UNROLL-NOT: load i32, ptr
; NOUNROLL-LABEL: @test
-; NOUNROLL: load i32, i32*
-; NOUNROLL-NOT: load i32, i32*
+; NOUNROLL: load i32, ptr
+; NOUNROLL-NOT: load i32, ptr
-define void @test(i32* %dst, i32* %src) {
+define void @test(ptr %dst, ptr %src) {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%0 = sext i32 %i to i64
- %1 = getelementptr inbounds i32, i32* %src, i64 %0
- %2 = load i32, i32* %1
+ %1 = getelementptr inbounds i32, ptr %src, i64 %0
+ %2 = load i32, ptr %1
%inc = add nsw i32 %i, 1
%cmp1 = icmp slt i32 %inc, 4
%cmp3 = icmp eq i32 %2, 1
br i1 %or.cond, label %for.body, label %exit
exit: ; preds = %for.body
- store i32 %i, i32* %dst
+ store i32 %i, ptr %dst
ret void
}
; The loop in the function only contains a few instructions, but they will get
; lowered to a very large amount of target instructions.
-define void @loop_with_large_vector_ops(i32 %i, <225 x double>* %A, <225 x double>* %B) {
+define void @loop_with_large_vector_ops(i32 %i, ptr %A, ptr %B) {
; CHECK-LABEL: @loop_with_large_vector_ops(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[A_GEP:%.*]] = getelementptr <225 x double>, <225 x double>* [[A:%.*]], i32 [[IV]]
-; CHECK-NEXT: [[LV_1:%.*]] = load <225 x double>, <225 x double>* [[A_GEP]], align 8
-; CHECK-NEXT: [[B_GEP:%.*]] = getelementptr <225 x double>, <225 x double>* [[A]], i32 [[IV]]
-; CHECK-NEXT: [[LV_2:%.*]] = load <225 x double>, <225 x double>* [[B_GEP]], align 8
+; CHECK-NEXT: [[A_GEP:%.*]] = getelementptr <225 x double>, ptr [[A:%.*]], i32 [[IV]]
+; CHECK-NEXT: [[LV_1:%.*]] = load <225 x double>, ptr [[A_GEP]], align 8
+; CHECK-NEXT: [[B_GEP:%.*]] = getelementptr <225 x double>, ptr [[A]], i32 [[IV]]
+; CHECK-NEXT: [[LV_2:%.*]] = load <225 x double>, ptr [[B_GEP]], align 8
; CHECK-NEXT: [[MUL:%.*]] = fmul <225 x double> [[LV_1]], [[LV_2]]
-; CHECK-NEXT: store <225 x double> [[MUL]], <225 x double>* [[B_GEP]], align 8
+; CHECK-NEXT: store <225 x double> [[MUL]], ptr [[B_GEP]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], 10
; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]]
loop:
%iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
- %A.gep = getelementptr <225 x double>, <225 x double>* %A, i32 %iv
- %lv.1 = load <225 x double>, <225 x double>* %A.gep, align 8
- %B.gep = getelementptr <225 x double>, <225 x double>* %A, i32 %iv
- %lv.2 = load <225 x double>, <225 x double>* %B.gep, align 8
+ %A.gep = getelementptr <225 x double>, ptr %A, i32 %iv
+ %lv.1 = load <225 x double>, ptr %A.gep, align 8
+ %B.gep = getelementptr <225 x double>, ptr %A, i32 %iv
+ %lv.2 = load <225 x double>, ptr %B.gep, align 8
%mul = fmul <225 x double> %lv.1, %lv.2
- store <225 x double> %mul, <225 x double>* %B.gep, align 8
+ store <225 x double> %mul, ptr %B.gep, align 8
%iv.next = add nuw i32 %iv, 1
%cmp = icmp ult i32 %iv.next, 10
br i1 %cmp, label %loop, label %exit
; PROLOG: for.body.prol:
; PROLOG: for.body:
-define i32 @test(i32* nocapture %a, i32 %n) nounwind uwtable readonly {
+define i32 @test(ptr nocapture %a, i32 %n) nounwind uwtable readonly {
entry:
%cmp1 = icmp eq i32 %n, 0
br i1 %cmp1, label %for.end, label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
; RUN: opt < %s -S -passes="default<O1>" -mtriple aarch64 | FileCheck %s -check-prefix=CHECK-GENERIC
; Testing that, while runtime unrolling is performed on in-order cores (such as the cortex-a55), it is not performed when -mcpu is not specified
-define void @runtime_unroll_generic(i32 %arg_0, i32* %arg_1, i16* %arg_2, i16* %arg_3) {
+define void @runtime_unroll_generic(i32 %arg_0, ptr %arg_1, ptr %arg_2, ptr %arg_3) {
; CHECK-A55-LABEL: @runtime_unroll_generic(
; CHECK-A55-NEXT: entry:
; CHECK-A55-NEXT: [[CMP52_NOT:%.*]] = icmp eq i32 [[ARG_0:%.*]], 0
; CHECK-A55-NEXT: br i1 [[CMP52_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY6_LR_PH:%.*]]
; CHECK-A55: for.body6.lr.ph:
-; CHECK-A55-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i16, i16* [[ARG_2:%.*]], i64 undef
-; CHECK-A55-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i16, i16* [[ARG_3:%.*]], i64 undef
-; CHECK-A55-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds i32, i32* [[ARG_1:%.*]], i64 undef
+; CHECK-A55-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i16, ptr [[ARG_2:%.*]], i64 undef
+; CHECK-A55-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i16, ptr [[ARG_3:%.*]], i64 undef
+; CHECK-A55-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds i32, ptr [[ARG_1:%.*]], i64 undef
; CHECK-A55-NEXT: [[XTRAITER:%.*]] = and i32 [[ARG_0]], 3
; CHECK-A55-NEXT: [[TMP0:%.*]] = icmp ult i32 [[ARG_0]], 4
; CHECK-A55-NEXT: br i1 [[TMP0]], label [[FOR_END_LOOPEXIT_UNR_LCSSA:%.*]], label [[FOR_BODY6_LR_PH_NEW:%.*]]
; CHECK-A55-NEXT: br label [[FOR_BODY6:%.*]]
; CHECK-A55: for.body6:
; CHECK-A55-NEXT: [[NITER:%.*]] = phi i32 [ 0, [[FOR_BODY6_LR_PH_NEW]] ], [ [[NITER_NEXT_3:%.*]], [[FOR_BODY6]] ]
-; CHECK-A55-NEXT: [[TMP1:%.*]] = load i16, i16* [[ARRAYIDX10]], align 2
+; CHECK-A55-NEXT: [[TMP1:%.*]] = load i16, ptr [[ARRAYIDX10]], align 2
; CHECK-A55-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
-; CHECK-A55-NEXT: [[TMP2:%.*]] = load i16, i16* [[ARRAYIDX14]], align 2
+; CHECK-A55-NEXT: [[TMP2:%.*]] = load i16, ptr [[ARRAYIDX14]], align 2
; CHECK-A55-NEXT: [[CONV15:%.*]] = sext i16 [[TMP2]] to i32
; CHECK-A55-NEXT: [[MUL16:%.*]] = mul nsw i32 [[CONV15]], [[CONV]]
-; CHECK-A55-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX20]], align 4
+; CHECK-A55-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX20]], align 4
; CHECK-A55-NEXT: [[ADD21:%.*]] = add nsw i32 [[MUL16]], [[TMP3]]
-; CHECK-A55-NEXT: store i32 [[ADD21]], i32* [[ARRAYIDX20]], align 4
-; CHECK-A55-NEXT: [[TMP4:%.*]] = load i16, i16* [[ARRAYIDX10]], align 2
+; CHECK-A55-NEXT: store i32 [[ADD21]], ptr [[ARRAYIDX20]], align 4
+; CHECK-A55-NEXT: [[TMP4:%.*]] = load i16, ptr [[ARRAYIDX10]], align 2
; CHECK-A55-NEXT: [[CONV_1:%.*]] = sext i16 [[TMP4]] to i32
-; CHECK-A55-NEXT: [[TMP5:%.*]] = load i16, i16* [[ARRAYIDX14]], align 2
+; CHECK-A55-NEXT: [[TMP5:%.*]] = load i16, ptr [[ARRAYIDX14]], align 2
; CHECK-A55-NEXT: [[CONV15_1:%.*]] = sext i16 [[TMP5]] to i32
; CHECK-A55-NEXT: [[MUL16_1:%.*]] = mul nsw i32 [[CONV15_1]], [[CONV_1]]
; CHECK-A55-NEXT: [[ADD21_1:%.*]] = add nsw i32 [[MUL16_1]], [[ADD21]]
-; CHECK-A55-NEXT: store i32 [[ADD21_1]], i32* [[ARRAYIDX20]], align 4
-; CHECK-A55-NEXT: [[TMP6:%.*]] = load i16, i16* [[ARRAYIDX10]], align 2
+; CHECK-A55-NEXT: store i32 [[ADD21_1]], ptr [[ARRAYIDX20]], align 4
+; CHECK-A55-NEXT: [[TMP6:%.*]] = load i16, ptr [[ARRAYIDX10]], align 2
; CHECK-A55-NEXT: [[CONV_2:%.*]] = sext i16 [[TMP6]] to i32
-; CHECK-A55-NEXT: [[TMP7:%.*]] = load i16, i16* [[ARRAYIDX14]], align 2
+; CHECK-A55-NEXT: [[TMP7:%.*]] = load i16, ptr [[ARRAYIDX14]], align 2
; CHECK-A55-NEXT: [[CONV15_2:%.*]] = sext i16 [[TMP7]] to i32
; CHECK-A55-NEXT: [[MUL16_2:%.*]] = mul nsw i32 [[CONV15_2]], [[CONV_2]]
; CHECK-A55-NEXT: [[ADD21_2:%.*]] = add nsw i32 [[MUL16_2]], [[ADD21_1]]
-; CHECK-A55-NEXT: store i32 [[ADD21_2]], i32* [[ARRAYIDX20]], align 4
-; CHECK-A55-NEXT: [[TMP8:%.*]] = load i16, i16* [[ARRAYIDX10]], align 2
+; CHECK-A55-NEXT: store i32 [[ADD21_2]], ptr [[ARRAYIDX20]], align 4
+; CHECK-A55-NEXT: [[TMP8:%.*]] = load i16, ptr [[ARRAYIDX10]], align 2
; CHECK-A55-NEXT: [[CONV_3:%.*]] = sext i16 [[TMP8]] to i32
-; CHECK-A55-NEXT: [[TMP9:%.*]] = load i16, i16* [[ARRAYIDX14]], align 2
+; CHECK-A55-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX14]], align 2
; CHECK-A55-NEXT: [[CONV15_3:%.*]] = sext i16 [[TMP9]] to i32
; CHECK-A55-NEXT: [[MUL16_3:%.*]] = mul nsw i32 [[CONV15_3]], [[CONV_3]]
; CHECK-A55-NEXT: [[ADD21_3:%.*]] = add nsw i32 [[MUL16_3]], [[ADD21_2]]
-; CHECK-A55-NEXT: store i32 [[ADD21_3]], i32* [[ARRAYIDX20]], align 4
+; CHECK-A55-NEXT: store i32 [[ADD21_3]], ptr [[ARRAYIDX20]], align 4
; CHECK-A55-NEXT: [[NITER_NEXT_3]] = add i32 [[NITER]], 4
; CHECK-A55-NEXT: [[NITER_NCMP_3_NOT:%.*]] = icmp eq i32 [[NITER_NEXT_3]], [[UNROLL_ITER]]
; CHECK-A55-NEXT: br i1 [[NITER_NCMP_3_NOT]], label [[FOR_END_LOOPEXIT_UNR_LCSSA]], label [[FOR_BODY6]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-A55-NEXT: [[LCMP_MOD_NOT:%.*]] = icmp eq i32 [[XTRAITER]], 0
; CHECK-A55-NEXT: br i1 [[LCMP_MOD_NOT]], label [[FOR_END]], label [[FOR_BODY6_EPIL:%.*]]
; CHECK-A55: for.body6.epil:
-; CHECK-A55-NEXT: [[TMP10:%.*]] = load i16, i16* [[ARRAYIDX10]], align 2
+; CHECK-A55-NEXT: [[TMP10:%.*]] = load i16, ptr [[ARRAYIDX10]], align 2
; CHECK-A55-NEXT: [[CONV_EPIL:%.*]] = sext i16 [[TMP10]] to i32
-; CHECK-A55-NEXT: [[TMP11:%.*]] = load i16, i16* [[ARRAYIDX14]], align 2
+; CHECK-A55-NEXT: [[TMP11:%.*]] = load i16, ptr [[ARRAYIDX14]], align 2
; CHECK-A55-NEXT: [[CONV15_EPIL:%.*]] = sext i16 [[TMP11]] to i32
; CHECK-A55-NEXT: [[MUL16_EPIL:%.*]] = mul nsw i32 [[CONV15_EPIL]], [[CONV_EPIL]]
-; CHECK-A55-NEXT: [[TMP12:%.*]] = load i32, i32* [[ARRAYIDX20]], align 4
+; CHECK-A55-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX20]], align 4
; CHECK-A55-NEXT: [[ADD21_EPIL:%.*]] = add nsw i32 [[MUL16_EPIL]], [[TMP12]]
-; CHECK-A55-NEXT: store i32 [[ADD21_EPIL]], i32* [[ARRAYIDX20]], align 4
+; CHECK-A55-NEXT: store i32 [[ADD21_EPIL]], ptr [[ARRAYIDX20]], align 4
; CHECK-A55-NEXT: [[EPIL_ITER_CMP_NOT:%.*]] = icmp eq i32 [[XTRAITER]], 1
; CHECK-A55-NEXT: br i1 [[EPIL_ITER_CMP_NOT]], label [[FOR_END]], label [[FOR_BODY6_EPIL_1:%.*]]
; CHECK-A55: for.body6.epil.1:
-; CHECK-A55-NEXT: [[TMP13:%.*]] = load i16, i16* [[ARRAYIDX10]], align 2
+; CHECK-A55-NEXT: [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX10]], align 2
; CHECK-A55-NEXT: [[CONV_EPIL_1:%.*]] = sext i16 [[TMP13]] to i32
-; CHECK-A55-NEXT: [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX14]], align 2
+; CHECK-A55-NEXT: [[TMP14:%.*]] = load i16, ptr [[ARRAYIDX14]], align 2
; CHECK-A55-NEXT: [[CONV15_EPIL_1:%.*]] = sext i16 [[TMP14]] to i32
; CHECK-A55-NEXT: [[MUL16_EPIL_1:%.*]] = mul nsw i32 [[CONV15_EPIL_1]], [[CONV_EPIL_1]]
-; CHECK-A55-NEXT: [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX20]], align 4
+; CHECK-A55-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX20]], align 4
; CHECK-A55-NEXT: [[ADD21_EPIL_1:%.*]] = add nsw i32 [[MUL16_EPIL_1]], [[TMP15]]
-; CHECK-A55-NEXT: store i32 [[ADD21_EPIL_1]], i32* [[ARRAYIDX20]], align 4
+; CHECK-A55-NEXT: store i32 [[ADD21_EPIL_1]], ptr [[ARRAYIDX20]], align 4
; CHECK-A55-NEXT: [[EPIL_ITER_CMP_1_NOT:%.*]] = icmp eq i32 [[XTRAITER]], 2
; CHECK-A55-NEXT: br i1 [[EPIL_ITER_CMP_1_NOT]], label [[FOR_END]], label [[FOR_BODY6_EPIL_2:%.*]]
; CHECK-A55: for.body6.epil.2:
-; CHECK-A55-NEXT: [[TMP16:%.*]] = load i16, i16* [[ARRAYIDX10]], align 2
+; CHECK-A55-NEXT: [[TMP16:%.*]] = load i16, ptr [[ARRAYIDX10]], align 2
; CHECK-A55-NEXT: [[CONV_EPIL_2:%.*]] = sext i16 [[TMP16]] to i32
-; CHECK-A55-NEXT: [[TMP17:%.*]] = load i16, i16* [[ARRAYIDX14]], align 2
+; CHECK-A55-NEXT: [[TMP17:%.*]] = load i16, ptr [[ARRAYIDX14]], align 2
; CHECK-A55-NEXT: [[CONV15_EPIL_2:%.*]] = sext i16 [[TMP17]] to i32
; CHECK-A55-NEXT: [[MUL16_EPIL_2:%.*]] = mul nsw i32 [[CONV15_EPIL_2]], [[CONV_EPIL_2]]
-; CHECK-A55-NEXT: [[TMP18:%.*]] = load i32, i32* [[ARRAYIDX20]], align 4
+; CHECK-A55-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX20]], align 4
; CHECK-A55-NEXT: [[ADD21_EPIL_2:%.*]] = add nsw i32 [[MUL16_EPIL_2]], [[TMP18]]
-; CHECK-A55-NEXT: store i32 [[ADD21_EPIL_2]], i32* [[ARRAYIDX20]], align 4
+; CHECK-A55-NEXT: store i32 [[ADD21_EPIL_2]], ptr [[ARRAYIDX20]], align 4
; CHECK-A55-NEXT: br label [[FOR_END]]
; CHECK-A55: for.end:
; CHECK-A55-NEXT: ret void
; CHECK-GENERIC-NEXT: [[CMP52_NOT:%.*]] = icmp eq i32 [[ARG_0:%.*]], 0
; CHECK-GENERIC-NEXT: br i1 [[CMP52_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY6_LR_PH:%.*]]
; CHECK-GENERIC: for.body6.lr.ph:
-; CHECK-GENERIC-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i16, i16* [[ARG_2:%.*]], i64 undef
-; CHECK-GENERIC-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i16, i16* [[ARG_3:%.*]], i64 undef
-; CHECK-GENERIC-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds i32, i32* [[ARG_1:%.*]], i64 undef
+; CHECK-GENERIC-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i16, ptr [[ARG_2:%.*]], i64 undef
+; CHECK-GENERIC-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i16, ptr [[ARG_3:%.*]], i64 undef
+; CHECK-GENERIC-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds i32, ptr [[ARG_1:%.*]], i64 undef
; CHECK-GENERIC-NEXT: br label [[FOR_BODY6:%.*]]
; CHECK-GENERIC: for.body6:
; CHECK-GENERIC-NEXT: [[K_03:%.*]] = phi i32 [ 0, [[FOR_BODY6_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY6]] ]
-; CHECK-GENERIC-NEXT: [[TMP0:%.*]] = load i16, i16* [[ARRAYIDX10]], align 2
+; CHECK-GENERIC-NEXT: [[TMP0:%.*]] = load i16, ptr [[ARRAYIDX10]], align 2
; CHECK-GENERIC-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32
-; CHECK-GENERIC-NEXT: [[TMP1:%.*]] = load i16, i16* [[ARRAYIDX14]], align 2
+; CHECK-GENERIC-NEXT: [[TMP1:%.*]] = load i16, ptr [[ARRAYIDX14]], align 2
; CHECK-GENERIC-NEXT: [[CONV15:%.*]] = sext i16 [[TMP1]] to i32
; CHECK-GENERIC-NEXT: [[MUL16:%.*]] = mul nsw i32 [[CONV15]], [[CONV]]
-; CHECK-GENERIC-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX20]], align 4
+; CHECK-GENERIC-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX20]], align 4
; CHECK-GENERIC-NEXT: [[ADD21:%.*]] = add nsw i32 [[MUL16]], [[TMP2]]
-; CHECK-GENERIC-NEXT: store i32 [[ADD21]], i32* [[ARRAYIDX20]], align 4
+; CHECK-GENERIC-NEXT: store i32 [[ADD21]], ptr [[ARRAYIDX20]], align 4
; CHECK-GENERIC-NEXT: [[INC]] = add nuw i32 [[K_03]], 1
; CHECK-GENERIC-NEXT: [[CMP5:%.*]] = icmp ult i32 [[INC]], [[ARG_0]]
; CHECK-GENERIC-NEXT: br i1 [[CMP5]], label [[FOR_BODY6]], label [[FOR_END]], !llvm.loop [[LOOP0:![0-9]+]]
;
entry:
%arg_0.addr = alloca i32, align 4
- %arg_1.addr = alloca i32*, align 8
- %arg_2.addr = alloca i16*, align 8
- %arg_3.addr = alloca i16*, align 8
+ %arg_1.addr = alloca ptr, align 8
+ %arg_2.addr = alloca ptr, align 8
+ %arg_3.addr = alloca ptr, align 8
%k = alloca i32, align 4
- store i32 %arg_0, i32* %arg_0.addr, align 4
- store i32* %arg_1, i32** %arg_1.addr, align 8
- store i16* %arg_2, i16** %arg_2.addr, align 8
- store i16* %arg_3, i16** %arg_3.addr, align 8
+ store i32 %arg_0, ptr %arg_0.addr, align 4
+ store ptr %arg_1, ptr %arg_1.addr, align 8
+ store ptr %arg_2, ptr %arg_2.addr, align 8
+ store ptr %arg_3, ptr %arg_3.addr, align 8
br label %for.cond
for.cond: ; preds = %entry
br label %for.body3
for.body3: ; preds = %for.cond1
- store i32 0, i32* %k, align 4
+ store i32 0, ptr %k, align 4
br label %for.cond4
for.cond4: ; preds = %for.inc, %for.body3
- %0 = load i32, i32* %k, align 4
- %1 = load i32, i32* %arg_0.addr, align 4
+ %0 = load i32, ptr %k, align 4
+ %1 = load i32, ptr %arg_0.addr, align 4
%cmp5 = icmp ult i32 %0, %1
br i1 %cmp5, label %for.body6, label %for.end
for.body6: ; preds = %for.cond4
- %2 = load i16*, i16** %arg_2.addr, align 8
- %arrayidx10 = getelementptr inbounds i16, i16* %2, i64 undef
- %3 = load i16, i16* %arrayidx10, align 2
+ %2 = load ptr, ptr %arg_2.addr, align 8
+ %arrayidx10 = getelementptr inbounds i16, ptr %2, i64 undef
+ %3 = load i16, ptr %arrayidx10, align 2
%conv = sext i16 %3 to i32
- %4 = load i16*, i16** %arg_3.addr, align 8
- %arrayidx14 = getelementptr inbounds i16, i16* %4, i64 undef
- %5 = load i16, i16* %arrayidx14, align 2
+ %4 = load ptr, ptr %arg_3.addr, align 8
+ %arrayidx14 = getelementptr inbounds i16, ptr %4, i64 undef
+ %5 = load i16, ptr %arrayidx14, align 2
%conv15 = sext i16 %5 to i32
%mul16 = mul nsw i32 %conv, %conv15
- %6 = load i32*, i32** %arg_1.addr, align 8
- %arrayidx20 = getelementptr inbounds i32, i32* %6, i64 undef
- %7 = load i32, i32* %arrayidx20, align 4
+ %6 = load ptr, ptr %arg_1.addr, align 8
+ %arrayidx20 = getelementptr inbounds i32, ptr %6, i64 undef
+ %7 = load i32, ptr %arrayidx20, align 4
%add21 = add nsw i32 %7, %mul16
- store i32 %add21, i32* %arrayidx20, align 4
+ store i32 %add21, ptr %arrayidx20, align 4
br label %for.inc
for.inc: ; preds = %for.body6
- %8 = load i32, i32* %k, align 4
+ %8 = load i32, ptr %k, align 4
%inc = add i32 %8, 1
- store i32 %inc, i32* %k, align 4
+ store i32 %inc, ptr %k, align 4
br label %for.cond4, !llvm.loop !0
for.end: ; preds = %for.cond4
%and4.i = lshr i32 %shl.i, 11
%conv6.i = and i32 %and4.i, 32767
%idxprom.i = zext i8 %conv3.i to i64
- %arrayidx.i7 = getelementptr inbounds [33 x i16], [33 x i16]* @tab_log2, i64 0, i64 %idxprom.i
- %t2 = load i16, i16* %arrayidx.i7, align 2
+ %arrayidx.i7 = getelementptr inbounds [33 x i16], ptr @tab_log2, i64 0, i64 %idxprom.i
+ %t2 = load i16, ptr %arrayidx.i7, align 2
%conv7.i = zext i16 %t2 to i32
%narrow.i = add nuw nsw i8 %conv3.i, 1
%t3 = zext i8 %narrow.i to i64
- %arrayidx11.i = getelementptr inbounds [33 x i16], [33 x i16]* @tab_log2, i64 0, i64 %t3
- %t4 = load i16, i16* %arrayidx11.i, align 2
+ %arrayidx11.i = getelementptr inbounds [33 x i16], ptr @tab_log2, i64 0, i64 %t3
+ %t4 = load i16, ptr %arrayidx11.i, align 2
%conv12.i = zext i16 %t4 to i32
%sub16.i = sub nsw i32 %conv12.i, %conv7.i
%mul.i8 = mul nsw i32 %conv6.i, %sub16.i
declare i32 @get()
-define void @fully_unrolled_single_iteration(i32* %src) #0 {
+define void @fully_unrolled_single_iteration(ptr %src) #0 {
; CHECK-LABEL: @fully_unrolled_single_iteration(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ARR:%.*]] = alloca [4 x i32], align 4
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[SRC:%.*]]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
-; CHECK-NEXT: store i32 [[V]], i32* [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
-; CHECK-NEXT: call void @use(i32* nonnull [[PTR]])
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[SRC:%.*]]
+; CHECK-NEXT: store i32 [[V]], ptr [[ARR]], align 4
+; CHECK-NEXT: call void @use(ptr nonnull [[ARR]])
; CHECK-NEXT: ret void
;
entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %src.idx = getelementptr inbounds i32, i32* %src, i64 %indvars.iv
- %v = load i32, i32* %src.idx
- %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
- store i32 %v, i32* %arrayidx, align 4
+ %src.idx = getelementptr inbounds i32, ptr %src, i64 %indvars.iv
+ %v = load i32, ptr %src.idx
+ %arrayidx = getelementptr inbounds [4 x i32], ptr %arr, i64 0, i64 %indvars.iv
+ store i32 %v, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 1
br i1 %exitcond, label %for.cond.cleanup, label %for.body
for.cond.cleanup: ; preds = %for.cond
- %ptr = bitcast [4 x i32]* %arr to i32*
- call void @use(i32* nonnull %ptr) #4
+ call void @use(ptr nonnull %arr) #4
ret void
}
; CHECK-NEXT: [[ARR:%.*]] = alloca [4 x i32], align 4
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
-; CHECK-NEXT: store i32 16, i32* [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 1
-; CHECK-NEXT: store i32 4104, i32* [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 2
-; CHECK-NEXT: store i32 1048592, i32* [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 3
-; CHECK-NEXT: store i32 268435480, i32* [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
-; CHECK-NEXT: call void @use(i32* nonnull [[PTR]])
+; CHECK-NEXT: store i32 16, ptr [[ARR]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds [4 x i32], ptr [[ARR]], i64 0, i64 1
+; CHECK-NEXT: store i32 4104, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds [4 x i32], ptr [[ARR]], i64 0, i64 2
+; CHECK-NEXT: store i32 1048592, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds [4 x i32], ptr [[ARR]], i64 0, i64 3
+; CHECK-NEXT: store i32 268435480, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: call void @use(ptr nonnull [[ARR]])
; CHECK-NEXT: ret void
;
entry:
%shl.0 = shl i32 %indvars.iv.tr, 3
%shl.1 = shl i32 16, %shl.0
%or = or i32 %shl.1, %shl.0
- %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
- store i32 %or, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [4 x i32], ptr %arr, i64 0, i64 %indvars.iv
+ store i32 %or, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv, 3
br i1 %exitcond, label %for.cond.cleanup, label %for.body
for.cond.cleanup: ; preds = %for.cond
- %ptr = bitcast [4 x i32]* %arr to i32*
- call void @use(i32* nonnull %ptr) #4
+ call void @use(ptr nonnull %arr) #4
ret void
}
; CHECK-NEXT: [[ARR:%.*]] = alloca [4 x i32], align 4
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
-; CHECK-NEXT: store i32 16, i32* [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 1
-; CHECK-NEXT: store i32 4104, i32* [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 2
-; CHECK-NEXT: store i32 1048592, i32* [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 3
-; CHECK-NEXT: store i32 268435480, i32* [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
-; CHECK-NEXT: call void @use(i32* nonnull [[PTR]])
+; CHECK-NEXT: store i32 16, ptr [[ARR]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds [4 x i32], ptr [[ARR]], i64 0, i64 1
+; CHECK-NEXT: store i32 4104, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds [4 x i32], ptr [[ARR]], i64 0, i64 2
+; CHECK-NEXT: store i32 1048592, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds [4 x i32], ptr [[ARR]], i64 0, i64 3
+; CHECK-NEXT: store i32 268435480, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: call void @use(ptr nonnull [[ARR]])
; CHECK-NEXT: ret void
;
entry:
%shl.0 = shl i32 %indvars.iv.tr, 3
%shl.1 = shl i32 16, %shl.0
%or = or i32 %shl.1, %shl.0
- %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
- store i32 %or, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [4 x i32], ptr %arr, i64 0, i64 %indvars.iv
+ store i32 %or, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv, 3
br i1 %exitcond, label %for.cond.cleanup, label %for.body
for.cond.cleanup: ; preds = %for.cond
- %ptr = bitcast [4 x i32]* %arr to i32*
- call void @use(i32* nonnull %ptr) #4
+ call void @use(ptr nonnull %arr) #4
ret void
}
; CHECK-NEXT: [[SHL_0:%.*]] = shl i32 [[INDVARS_IV_TR]], 3
; CHECK-NEXT: [[SHL_1:%.*]] = shl i32 16, [[SHL_0]]
; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL_1]], [[SHL_0]]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 [[INDVARS_IV]]
-; CHECK-NEXT: store i32 [[OR]], i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], ptr [[ARR]], i64 0, i64 [[INDVARS_IV]]
+; CHECK-NEXT: store i32 [[OR]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV]], 7
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
-; CHECK-NEXT: call void @use(i32* nonnull [[PTR]])
+; CHECK-NEXT: call void @use(ptr nonnull [[ARR]])
; CHECK-NEXT: ret void
;
entry:
%shl.0 = shl i32 %indvars.iv.tr, 3
%shl.1 = shl i32 16, %shl.0
%or = or i32 %shl.1, %shl.0
- %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
- store i32 %or, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [4 x i32], ptr %arr, i64 0, i64 %indvars.iv
+ store i32 %or, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv, 7
br i1 %exitcond, label %for.cond.cleanup, label %for.body
for.cond.cleanup: ; preds = %for.cond
- %ptr = bitcast [4 x i32]* %arr to i32*
- call void @use(i32* nonnull %ptr) #4
+ call void @use(ptr nonnull %arr) #4
ret void
}
-declare void @use(i32*)
+declare void @use(ptr)
attributes #0 = { optsize }
attributes #1 = { minsize optsize }
; Test that max iterations count to analyze (specific for the target)
; is enough to make the inner loop completely unrolled
; CHECK-LABEL: foo
-define void @foo(float addrspace(5)* %ptrB, float addrspace(5)* %ptrC, i32 %A, i32 %A2, float %M) {
+define void @foo(ptr addrspace(5) %ptrB, ptr addrspace(5) %ptrC, i32 %A, i32 %A2, float %M) {
bb:
br label %bb2
%phi = phi i32 [ 0, %bb4 ], [ %inc, %for.body ]
%mul = shl nuw nsw i32 %phi, 6
%add = add i32 %A, %mul
- %arrayidx = getelementptr inbounds float, float addrspace(5)* %ptrC, i32 %add
- %ld1 = load float, float addrspace(5)* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr addrspace(5) %ptrC, i32 %add
+ %ld1 = load float, ptr addrspace(5) %arrayidx, align 4
%mul2 = shl nuw nsw i32 %phi, 3
%add2 = add i32 %A2, %mul2
- %arrayidx2 = getelementptr inbounds float, float addrspace(5)* %ptrB, i32 %add2
- %ld2 = load float, float addrspace(5)* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds float, ptr addrspace(5) %ptrB, i32 %add2
+ %ld2 = load float, ptr addrspace(5) %arrayidx2, align 4
%mul3 = fmul contract float %M, %ld2
%add3 = fadd contract float %ld1, %mul3
- store float %add3, float addrspace(5)* %arrayidx, align 4
+ store float %add3, ptr addrspace(5) %arrayidx, align 4
%add1 = add nuw nsw i32 %add, 2048
- %arrayidx3 = getelementptr inbounds float, float addrspace(5)* %ptrC, i32 %add1
- %ld3 = load float, float addrspace(5)* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds float, ptr addrspace(5) %ptrC, i32 %add1
+ %ld3 = load float, ptr addrspace(5) %arrayidx3, align 4
%mul4 = fmul contract float %ld2, %M
%add4 = fadd contract float %ld3, %mul4
- store float %add4, float addrspace(5)* %arrayidx3, align 4
+ store float %add4, ptr addrspace(5) %arrayidx3, align 4
%inc = add nuw nsw i32 %phi, 1
%cmpi = icmp ult i32 %phi, 31
br i1 %cmpi, label %for.body, label %bb10
; CHECK: call void @llvm.amdgcn.s.barrier()
; CHECK: call void @llvm.amdgcn.s.barrier()
; CHECK-NOT: br
-define amdgpu_kernel void @test_unroll_convergent_barrier(i32 addrspace(1)* noalias nocapture %out, i32 addrspace(1)* noalias nocapture %in) #0 {
+define amdgpu_kernel void @test_unroll_convergent_barrier(ptr addrspace(1) noalias nocapture %out, ptr addrspace(1) noalias nocapture %in) #0 {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx.in = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 %indvars.iv
- %arrayidx.out = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %indvars.iv
- %load = load i32, i32 addrspace(1)* %arrayidx.in
+ %arrayidx.in = getelementptr inbounds i32, ptr addrspace(1) %in, i32 %indvars.iv
+ %arrayidx.out = getelementptr inbounds i32, ptr addrspace(1) %out, i32 %indvars.iv
+ %load = load i32, ptr addrspace(1) %arrayidx.in
call void @llvm.amdgcn.s.barrier() #1
%add = add i32 %load, %sum.02
- store i32 %add, i32 addrspace(1)* %arrayidx.out
+ store i32 %add, ptr addrspace(1) %arrayidx.out
%indvars.iv.next = add i32 %indvars.iv, 1
%exitcond = icmp eq i32 %indvars.iv.next, 4
br i1 %exitcond, label %for.end, label %for.body
; CHECK-LABEL: @test_func_addrspacecast_cost_noop(
; CHECK-NOT: br i1
-define amdgpu_kernel void @test_func_addrspacecast_cost_noop(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture %in) #0 {
+define amdgpu_kernel void @test_func_addrspacecast_cost_noop(ptr addrspace(1) noalias nocapture %out, ptr addrspace(1) noalias nocapture %in) #0 {
entry:
br label %for.body
for.body:
%indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi float [ %fmul, %for.body ], [ 0.0, %entry ]
- %arrayidx.in = getelementptr inbounds float, float addrspace(1)* %in, i32 %indvars.iv
- %arrayidx.out = getelementptr inbounds float, float addrspace(1)* %out, i32 %indvars.iv
- %cast.in = addrspacecast float addrspace(1)* %arrayidx.in to float*
- %cast.out = addrspacecast float addrspace(1)* %arrayidx.out to float*
- %load = load float, float* %cast.in
+ %arrayidx.in = getelementptr inbounds float, ptr addrspace(1) %in, i32 %indvars.iv
+ %arrayidx.out = getelementptr inbounds float, ptr addrspace(1) %out, i32 %indvars.iv
+ %cast.in = addrspacecast ptr addrspace(1) %arrayidx.in to ptr
+ %cast.out = addrspacecast ptr addrspace(1) %arrayidx.out to ptr
+ %load = load float, ptr %cast.in
%fmul = fmul float %load, %sum.02
- store float %fmul, float* %cast.out
+ store float %fmul, ptr %cast.out
%indvars.iv.next = add i32 %indvars.iv, 1
%exitcond = icmp eq i32 %indvars.iv.next, 16
br i1 %exitcond, label %for.end, label %for.body
; Free, but not a no-op
; CHECK-LABEL: @test_func_addrspacecast_cost_free(
; CHECK-NOT: br i1
-define amdgpu_kernel void @test_func_addrspacecast_cost_free(float* noalias nocapture %out, float* noalias nocapture %in) #0 {
+define amdgpu_kernel void @test_func_addrspacecast_cost_free(ptr noalias nocapture %out, ptr noalias nocapture %in) #0 {
entry:
br label %for.body
for.body:
%indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi float [ %fmul, %for.body ], [ 0.0, %entry ]
- %arrayidx.in = getelementptr inbounds float, float* %in, i32 %indvars.iv
- %arrayidx.out = getelementptr inbounds float, float* %out, i32 %indvars.iv
- %cast.in = addrspacecast float* %arrayidx.in to float addrspace(3)*
- %cast.out = addrspacecast float* %arrayidx.out to float addrspace(3)*
- %load = load float, float addrspace(3)* %cast.in
+ %arrayidx.in = getelementptr inbounds float, ptr %in, i32 %indvars.iv
+ %arrayidx.out = getelementptr inbounds float, ptr %out, i32 %indvars.iv
+ %cast.in = addrspacecast ptr %arrayidx.in to ptr addrspace(3)
+ %cast.out = addrspacecast ptr %arrayidx.out to ptr addrspace(3)
+ %load = load float, ptr addrspace(3) %cast.in
%fmul = fmul float %load, %sum.02
- store float %fmul, float addrspace(3)* %cast.out
+ store float %fmul, ptr addrspace(3) %cast.out
%indvars.iv.next = add i32 %indvars.iv, 1
%exitcond = icmp eq i32 %indvars.iv.next, 16
br i1 %exitcond, label %for.end, label %for.body
; CHECK-LABEL: @test_func_addrspacecast_cost_nonfree(
; CHECK: br i1 %exitcond
-define amdgpu_kernel void @test_func_addrspacecast_cost_nonfree(float addrspace(3)* noalias nocapture %out, float addrspace(3)* noalias nocapture %in) #0 {
+define amdgpu_kernel void @test_func_addrspacecast_cost_nonfree(ptr addrspace(3) noalias nocapture %out, ptr addrspace(3) noalias nocapture %in) #0 {
entry:
br label %for.body
for.body:
%indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi float [ %fmul, %for.body ], [ 0.0, %entry ]
- %arrayidx.in = getelementptr inbounds float, float addrspace(3)* %in, i32 %indvars.iv
- %arrayidx.out = getelementptr inbounds float, float addrspace(3)* %out, i32 %indvars.iv
- %cast.in = addrspacecast float addrspace(3)* %arrayidx.in to float*
- %cast.out = addrspacecast float addrspace(3)* %arrayidx.out to float*
- %load = load float, float* %cast.in
+ %arrayidx.in = getelementptr inbounds float, ptr addrspace(3) %in, i32 %indvars.iv
+ %arrayidx.out = getelementptr inbounds float, ptr addrspace(3) %out, i32 %indvars.iv
+ %cast.in = addrspacecast ptr addrspace(3) %arrayidx.in to ptr
+ %cast.out = addrspacecast ptr addrspace(3) %arrayidx.out to ptr
+ %load = load float, ptr %cast.in
%fmul = fmul float %load, %sum.02
- store float %fmul, float* %cast.out
+ store float %fmul, ptr %cast.out
%indvars.iv.next = add i32 %indvars.iv, 1
%exitcond = icmp eq i32 %indvars.iv.next, 16
br i1 %exitcond, label %for.end, label %for.body
; CHECK-LABEL: @test_intrinsic_call_cost(
; CHECK-NOT: br i1
-define amdgpu_kernel void @test_intrinsic_call_cost(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture %in) #0 {
+define amdgpu_kernel void @test_intrinsic_call_cost(ptr addrspace(1) noalias nocapture %out, ptr addrspace(1) noalias nocapture %in) #0 {
entry:
br label %for.body
for.body:
%indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi float [ %fmul, %for.body ], [ 0.0, %entry ]
- %arrayidx.in = getelementptr inbounds float, float addrspace(1)* %in, i32 %indvars.iv
- %arrayidx.out = getelementptr inbounds float, float addrspace(1)* %out, i32 %indvars.iv
- %load = load float, float addrspace(1)* %arrayidx.in
+ %arrayidx.in = getelementptr inbounds float, ptr addrspace(1) %in, i32 %indvars.iv
+ %arrayidx.out = getelementptr inbounds float, ptr addrspace(1) %out, i32 %indvars.iv
+ %load = load float, ptr addrspace(1) %arrayidx.in
%call = call float @llvm.minnum.f32(float %load, float 1.0);
%fmul = fmul float %call, %sum.02
- store float %fmul, float addrspace(1)* %arrayidx.out
+ store float %fmul, ptr addrspace(1) %arrayidx.out
%indvars.iv.next = add i32 %indvars.iv, 1
%exitcond = icmp eq i32 %indvars.iv.next, 16
br i1 %exitcond, label %for.end, label %for.body
; CHECK-LABEL: @test_func_call_cost(
; CHECK: br i1 %exitcond
-define amdgpu_kernel void @test_func_call_cost(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture %in) #0 {
+define amdgpu_kernel void @test_func_call_cost(ptr addrspace(1) noalias nocapture %out, ptr addrspace(1) noalias nocapture %in) #0 {
entry:
br label %for.body
for.body:
%indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi float [ %fmul, %for.body ], [ 0.0, %entry ]
- %arrayidx.in = getelementptr inbounds float, float addrspace(1)* %in, i32 %indvars.iv
- %arrayidx.out = getelementptr inbounds float, float addrspace(1)* %out, i32 %indvars.iv
- %load = load float, float addrspace(1)* %arrayidx.in
- %fptr = load float(float, float)*, float(float, float )* addrspace(4)* null
+ %arrayidx.in = getelementptr inbounds float, ptr addrspace(1) %in, i32 %indvars.iv
+ %arrayidx.out = getelementptr inbounds float, ptr addrspace(1) %out, i32 %indvars.iv
+ %load = load float, ptr addrspace(1) %arrayidx.in
+ %fptr = load ptr, ptr addrspace(4) null
%call = tail call float %fptr(float %load, float 1.0)
%fmul = fmul float %call, %sum.02
- store float %fmul, float addrspace(1)* %arrayidx.out
+ store float %fmul, ptr addrspace(1) %arrayidx.out
%indvars.iv.next = add i32 %indvars.iv, 1
%exitcond = icmp eq i32 %indvars.iv.next, 16
br i1 %exitcond, label %for.end, label %for.body
; CHECK-LABEL: @test_indirect_call_cost(
; CHECK: br i1 %exitcond
-define amdgpu_kernel void @test_indirect_call_cost(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture %in) #0 {
+define amdgpu_kernel void @test_indirect_call_cost(ptr addrspace(1) noalias nocapture %out, ptr addrspace(1) noalias nocapture %in) #0 {
entry:
br label %for.body
for.body:
%indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi float [ %fmul, %for.body ], [ 0.0, %entry ]
- %arrayidx.in = getelementptr inbounds float, float addrspace(1)* %in, i32 %indvars.iv
- %arrayidx.out = getelementptr inbounds float, float addrspace(1)* %out, i32 %indvars.iv
- %load = load float, float addrspace(1)* %arrayidx.in
+ %arrayidx.in = getelementptr inbounds float, ptr addrspace(1) %in, i32 %indvars.iv
+ %arrayidx.out = getelementptr inbounds float, ptr addrspace(1) %out, i32 %indvars.iv
+ %load = load float, ptr addrspace(1) %arrayidx.in
%min = call float @func(float %load, float 1.0);
%fmul = fmul float %min, %sum.02
- store float %fmul, float addrspace(1)* %arrayidx.out
+ store float %fmul, ptr addrspace(1) %arrayidx.out
%indvars.iv.next = add i32 %indvars.iv, 1
%exitcond = icmp eq i32 %indvars.iv.next, 16
br i1 %exitcond, label %for.end, label %for.body
; CHECK-LABEL: @non_invariant_ind
; CHECK: for.body:
; CHECK-NOT: br
-; CHECK: store i32 %tmp15, i32 addrspace(1)* %arrayidx7, align 4
+; CHECK: store i32 %tmp15, ptr addrspace(1) %arrayidx7, align 4
; CHECK: ret void
-define amdgpu_kernel void @non_invariant_ind(i32 addrspace(1)* nocapture %a, i32 %x) {
+define amdgpu_kernel void @non_invariant_ind(ptr addrspace(1) nocapture %a, i32 %x) {
entry:
%arr = alloca [64 x i32], align 4, addrspace(5)
%tmp1 = tail call i32 @llvm.amdgcn.workitem.id.x() #1
br label %for.body
for.cond.cleanup: ; preds = %for.body
- %arrayidx5 = getelementptr inbounds [64 x i32], [64 x i32] addrspace(5)* %arr, i32 0, i32 %x
- %tmp15 = load i32, i32 addrspace(5)* %arrayidx5, align 4
- %arrayidx7 = getelementptr inbounds i32, i32 addrspace(1)* %a, i32 %tmp1
- store i32 %tmp15, i32 addrspace(1)* %arrayidx7, align 4
+ %arrayidx5 = getelementptr inbounds [64 x i32], ptr addrspace(5) %arr, i32 0, i32 %x
+ %tmp15 = load i32, ptr addrspace(5) %arrayidx5, align 4
+ %arrayidx7 = getelementptr inbounds i32, ptr addrspace(1) %a, i32 %tmp1
+ store i32 %tmp15, ptr addrspace(1) %arrayidx7, align 4
ret void
for.body: ; preds = %for.body, %entry
%i.015 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%idxprom = sext i32 %i.015 to i64
- %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idxprom
- %tmp16 = load i32, i32 addrspace(1)* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %a, i64 %idxprom
+ %tmp16 = load i32, ptr addrspace(1) %arrayidx, align 4
%add = add nsw i32 %i.015, %tmp1
%rem = srem i32 %add, 64
- %arrayidx3 = getelementptr inbounds [64 x i32], [64 x i32] addrspace(5)* %arr, i32 0, i32 %rem
- store i32 %tmp16, i32 addrspace(5)* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds [64 x i32], ptr addrspace(5) %arr, i32 0, i32 %rem
+ store i32 %tmp16, ptr addrspace(5) %arrayidx3, align 4
%inc = add nuw nsw i32 %i.015, 1
%exitcond = icmp eq i32 %inc, 100
br i1 %exitcond, label %for.cond.cleanup, label %for.body
; CHECK: br i1 %[[exitcond]]
; CHECK-NOT: icmp eq i32 %{{.*}}, 100
-define amdgpu_kernel void @invariant_ind(i32 addrspace(1)* nocapture %a, i32 %x) {
+define amdgpu_kernel void @invariant_ind(ptr addrspace(1) nocapture %a, i32 %x) {
entry:
%arr = alloca [64 x i32], align 4, addrspace(5)
%tmp1 = tail call i32 @llvm.amdgcn.workitem.id.x() #1
for.cond2.preheader: ; preds = %for.cond.cleanup5, %entry
%i.026 = phi i32 [ 0, %entry ], [ %inc10, %for.cond.cleanup5 ]
%idxprom = sext i32 %i.026 to i64
- %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idxprom
- %tmp15 = load i32, i32 addrspace(1)* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %a, i64 %idxprom
+ %tmp15 = load i32, ptr addrspace(1) %arrayidx, align 4
br label %for.body6
for.cond.cleanup: ; preds = %for.cond.cleanup5
- %arrayidx13 = getelementptr inbounds [64 x i32], [64 x i32] addrspace(5)* %arr, i32 0, i32 %x
- %tmp16 = load i32, i32 addrspace(5)* %arrayidx13, align 4
- %arrayidx15 = getelementptr inbounds i32, i32 addrspace(1)* %a, i32 %tmp1
- store i32 %tmp16, i32 addrspace(1)* %arrayidx15, align 4
+ %arrayidx13 = getelementptr inbounds [64 x i32], ptr addrspace(5) %arr, i32 0, i32 %x
+ %tmp16 = load i32, ptr addrspace(5) %arrayidx13, align 4
+ %arrayidx15 = getelementptr inbounds i32, ptr addrspace(1) %a, i32 %tmp1
+ store i32 %tmp16, ptr addrspace(1) %arrayidx15, align 4
ret void
for.cond.cleanup5: ; preds = %for.body6
%j.025 = phi i32 [ 0, %for.cond2.preheader ], [ %inc, %for.body6 ]
%add = add nsw i32 %j.025, %tmp1
%rem = srem i32 %add, 64
- %arrayidx8 = getelementptr inbounds [64 x i32], [64 x i32] addrspace(5)* %arr, i32 0, i32 %rem
- store i32 %tmp15, i32 addrspace(5)* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds [64 x i32], ptr addrspace(5) %arr, i32 0, i32 %rem
+ store i32 %tmp15, ptr addrspace(5) %arrayidx8, align 4
%inc = add nuw nsw i32 %j.025, 1
%exitcond = icmp eq i32 %inc, 100
br i1 %exitcond, label %for.cond.cleanup5, label %for.body6
; CHECK: icmp eq i32 %{{.*}}, 100
; CHECK: br
-define amdgpu_kernel void @too_big(i32 addrspace(1)* nocapture %a, i32 %x) {
+define amdgpu_kernel void @too_big(ptr addrspace(1) nocapture %a, i32 %x) {
entry:
%arr = alloca [256 x i32], align 4, addrspace(5)
%tmp1 = tail call i32 @llvm.amdgcn.workitem.id.x() #1
br label %for.body
for.cond.cleanup: ; preds = %for.body
- %arrayidx5 = getelementptr inbounds [256 x i32], [256 x i32] addrspace(5)* %arr, i32 0, i32 %x
- %tmp15 = load i32, i32 addrspace(5)* %arrayidx5, align 4
- %arrayidx7 = getelementptr inbounds i32, i32 addrspace(1)* %a, i32 %tmp1
- store i32 %tmp15, i32 addrspace(1)* %arrayidx7, align 4
+ %arrayidx5 = getelementptr inbounds [256 x i32], ptr addrspace(5) %arr, i32 0, i32 %x
+ %tmp15 = load i32, ptr addrspace(5) %arrayidx5, align 4
+ %arrayidx7 = getelementptr inbounds i32, ptr addrspace(1) %a, i32 %tmp1
+ store i32 %tmp15, ptr addrspace(1) %arrayidx7, align 4
ret void
for.body: ; preds = %for.body, %entry
%i.015 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%idxprom = sext i32 %i.015 to i64
- %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idxprom
- %tmp16 = load i32, i32 addrspace(1)* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %a, i64 %idxprom
+ %tmp16 = load i32, ptr addrspace(1) %arrayidx, align 4
%add = add nsw i32 %i.015, %tmp1
%rem = srem i32 %add, 64
- %arrayidx3 = getelementptr inbounds [256 x i32], [256 x i32] addrspace(5)* %arr, i32 0, i32 %rem
- store i32 %tmp16, i32 addrspace(5)* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds [256 x i32], ptr addrspace(5) %arr, i32 0, i32 %rem
+ store i32 %tmp16, ptr addrspace(5) %arrayidx3, align 4
%inc = add nuw nsw i32 %i.015, 1
%exitcond = icmp eq i32 %inc, 100
br i1 %exitcond, label %for.cond.cleanup, label %for.body
; CHECK: icmp eq i32 %{{.*}}, 100
; CHECK: br
-define amdgpu_kernel void @dynamic_size_alloca(i32 addrspace(1)* nocapture %a, i32 %n, i32 %x) {
+define amdgpu_kernel void @dynamic_size_alloca(ptr addrspace(1) nocapture %a, i32 %n, i32 %x) {
entry:
%arr = alloca i32, i32 %n, align 4, addrspace(5)
%tmp1 = tail call i32 @llvm.amdgcn.workitem.id.x() #1
br label %for.body
for.cond.cleanup: ; preds = %for.body
- %arrayidx5 = getelementptr inbounds i32, i32 addrspace(5)* %arr, i32 %x
- %tmp15 = load i32, i32 addrspace(5)* %arrayidx5, align 4
- %arrayidx7 = getelementptr inbounds i32, i32 addrspace(1)* %a, i32 %tmp1
- store i32 %tmp15, i32 addrspace(1)* %arrayidx7, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr addrspace(5) %arr, i32 %x
+ %tmp15 = load i32, ptr addrspace(5) %arrayidx5, align 4
+ %arrayidx7 = getelementptr inbounds i32, ptr addrspace(1) %a, i32 %tmp1
+ store i32 %tmp15, ptr addrspace(1) %arrayidx7, align 4
ret void
for.body: ; preds = %for.body, %entry
%i.015 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%idxprom = sext i32 %i.015 to i64
- %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idxprom
- %tmp16 = load i32, i32 addrspace(1)* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %a, i64 %idxprom
+ %tmp16 = load i32, ptr addrspace(1) %arrayidx, align 4
%add = add nsw i32 %i.015, %tmp1
%rem = srem i32 %add, 64
- %arrayidx3 = getelementptr inbounds i32, i32 addrspace(5)* %arr, i32 %rem
- store i32 %tmp16, i32 addrspace(5)* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr addrspace(5) %arr, i32 %rem
+ store i32 %tmp16, ptr addrspace(5) %arrayidx3, align 4
%inc = add nuw nsw i32 %i.015, 1
%exitcond = icmp eq i32 %inc, 100
br i1 %exitcond, label %for.cond.cleanup, label %for.body
}
-declare i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() #1
+declare ptr addrspace(4) @llvm.amdgcn.dispatch.ptr() #1
declare i32 @llvm.amdgcn.workitem.id.x() #1
declare i32 @llvm.amdgcn.workgroup.id.x() #1
-declare i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr() #1
+declare ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() #1
attributes #1 = { nounwind readnone }
; CHECK: br i1 %cmp
; CHECK: ret void
-@in = internal unnamed_addr global i32* null, align 8
-@out = internal unnamed_addr global i32* null, align 8
+@in = internal unnamed_addr global ptr null, align 8
+@out = internal unnamed_addr global ptr null, align 8
define void @unroll_default() {
entry:
do.body: ; preds = %entry
%i.0 = phi i32 [ 0, %entry ], [ %inc, %do.body ]
- %v1 = load i64, i64* bitcast (i32** @in to i64*), align 8
- store i64 %v1, i64* bitcast (i32** @out to i64*), align 8
+ %v1 = load i64, ptr @in, align 8
+ store i64 %v1, ptr @out, align 8
%inc = add nsw i32 %i.0, 1
%cmp = icmp slt i32 %inc, 100
br i1 %cmp, label %do.body, label %do.end
do.body: ; preds = %entry
%i.0 = phi i32 [ 0, %entry ], [ %inc, %do.body ]
- %v1 = load i64, i64* bitcast (i32** @in to i64*), align 8
- store i64 %v1, i64* bitcast (i32** @out to i64*), align 8
+ %v1 = load i64, ptr @in, align 8
+ store i64 %v1, ptr @out, align 8
%inc = add nsw i32 %i.0, 1
%cmp = icmp slt i32 %inc, 100
br i1 %cmp, label %do.body, label %do.end, !llvm.loop !1
do.body: ; preds = %entry
%i.0 = phi i32 [ 0, %entry ], [ %inc, %do.body ]
- %v1 = load i64, i64* bitcast (i32** @in to i64*), align 8
- store i64 %v1, i64* bitcast (i32** @out to i64*), align 8
+ %v1 = load i64, ptr @in, align 8
+ store i64 %v1, ptr @out, align 8
%inc = add nsw i32 %i.0, 1
%cmp = icmp slt i32 %inc, 100
br i1 %cmp, label %do.body, label %do.end, !llvm.loop !3
do.body: ; preds = %entry
%i.0 = phi i32 [ 0, %entry ], [ %inc, %do.body ]
- %v1 = load i64, i64* bitcast (i32** @in to i64*), align 8
- store i64 %v1, i64* bitcast (i32** @out to i64*), align 8
+ %v1 = load i64, ptr @in, align 8
+ store i64 %v1, ptr @out, align 8
%inc = add nsw i32 %i.0, 1
%cmp = icmp slt i32 %inc, 100
br i1 %cmp, label %do.body, label %do.end, !llvm.loop !1
; CHECK: ret void
; CHECK: }
-define void @foo(i8* nocapture, i8* nocapture readonly, i32) {
+define void @foo(ptr nocapture, ptr nocapture readonly, i32) {
%4 = icmp sgt i32 %2, 0
br i1 %4, label %5, label %16
; <label>:6:
%7 = phi i32 [ %13, %6 ], [ %2, %5 ]
- %8 = phi i8* [ %10, %6 ], [ %1, %5 ]
- %9 = phi i8* [ %12, %6 ], [ %0, %5 ]
- %10 = getelementptr inbounds i8, i8* %8, i32 1
- %11 = load i8, i8* %8, align 1
- %12 = getelementptr inbounds i8, i8* %9, i32 1
- store i8 %11, i8* %9, align 1
+ %8 = phi ptr [ %10, %6 ], [ %1, %5 ]
+ %9 = phi ptr [ %12, %6 ], [ %0, %5 ]
+ %10 = getelementptr inbounds i8, ptr %8, i32 1
+ %11 = load i8, ptr %8, align 1
+ %12 = getelementptr inbounds i8, ptr %9, i32 1
+ store i8 %11, ptr %9, align 1
%13 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %7, i32 1)
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=loop-unroll -unroll-allow-partial -unroll-optsize-threshold=18 -mtriple=thumbv8 -S %s -o - | FileCheck %s --check-prefix=CHECK-V8
-define void @test_i32_add_optsize(i32* %a, i32* %b, i32* %c) #0 {
+define void @test_i32_add_optsize(ptr %a, ptr %b, ptr %c) #0 {
; CHECK-V8-LABEL: @test_i32_add_optsize(
; CHECK-V8-NEXT: entry:
; CHECK-V8-NEXT: br label [[LOOP:%.*]]
; CHECK-V8: loop:
; CHECK-V8-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[COUNT_1:%.*]], [[LOOP]] ]
-; CHECK-V8-NEXT: [[ADDR_A:%.*]] = getelementptr i32, i32* [[A:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: [[ADDR_B:%.*]] = getelementptr i32, i32* [[B:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: [[DATA_A:%.*]] = load i32, i32* [[ADDR_A]], align 4
-; CHECK-V8-NEXT: [[DATA_B:%.*]] = load i32, i32* [[ADDR_B]], align 4
+; CHECK-V8-NEXT: [[ADDR_A:%.*]] = getelementptr i32, ptr [[A:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: [[ADDR_B:%.*]] = getelementptr i32, ptr [[B:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: [[DATA_A:%.*]] = load i32, ptr [[ADDR_A]], align 4
+; CHECK-V8-NEXT: [[DATA_B:%.*]] = load i32, ptr [[ADDR_B]], align 4
; CHECK-V8-NEXT: [[RES:%.*]] = add i32 [[DATA_A]], [[DATA_B]]
-; CHECK-V8-NEXT: [[ADDR_C:%.*]] = getelementptr i32, i32* [[C:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: store i32 [[RES]], i32* [[ADDR_C]], align 4
+; CHECK-V8-NEXT: [[ADDR_C:%.*]] = getelementptr i32, ptr [[C:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: store i32 [[RES]], ptr [[ADDR_C]], align 4
; CHECK-V8-NEXT: [[COUNT:%.*]] = add nuw nsw i32 [[IV]], 1
-; CHECK-V8-NEXT: [[ADDR_A_1:%.*]] = getelementptr i32, i32* [[A]], i32 [[COUNT]]
-; CHECK-V8-NEXT: [[ADDR_B_1:%.*]] = getelementptr i32, i32* [[B]], i32 [[COUNT]]
-; CHECK-V8-NEXT: [[DATA_A_1:%.*]] = load i32, i32* [[ADDR_A_1]], align 4
-; CHECK-V8-NEXT: [[DATA_B_1:%.*]] = load i32, i32* [[ADDR_B_1]], align 4
+; CHECK-V8-NEXT: [[ADDR_A_1:%.*]] = getelementptr i32, ptr [[A]], i32 [[COUNT]]
+; CHECK-V8-NEXT: [[ADDR_B_1:%.*]] = getelementptr i32, ptr [[B]], i32 [[COUNT]]
+; CHECK-V8-NEXT: [[DATA_A_1:%.*]] = load i32, ptr [[ADDR_A_1]], align 4
+; CHECK-V8-NEXT: [[DATA_B_1:%.*]] = load i32, ptr [[ADDR_B_1]], align 4
; CHECK-V8-NEXT: [[RES_1:%.*]] = add i32 [[DATA_A_1]], [[DATA_B_1]]
-; CHECK-V8-NEXT: [[ADDR_C_1:%.*]] = getelementptr i32, i32* [[C]], i32 [[COUNT]]
-; CHECK-V8-NEXT: store i32 [[RES_1]], i32* [[ADDR_C_1]], align 4
+; CHECK-V8-NEXT: [[ADDR_C_1:%.*]] = getelementptr i32, ptr [[C]], i32 [[COUNT]]
+; CHECK-V8-NEXT: store i32 [[RES_1]], ptr [[ADDR_C_1]], align 4
; CHECK-V8-NEXT: [[COUNT_1]] = add nuw nsw i32 [[COUNT]], 1
; CHECK-V8-NEXT: [[END_1:%.*]] = icmp ne i32 [[COUNT_1]], 100
; CHECK-V8-NEXT: br i1 [[END_1]], label [[LOOP]], label [[EXIT:%.*]]
loop:
%iv = phi i32 [ 0, %entry ], [ %count, %loop ]
- %addr.a = getelementptr i32, i32* %a, i32 %iv
- %addr.b = getelementptr i32, i32* %b, i32 %iv
- %data.a = load i32, i32* %addr.a
- %data.b = load i32, i32* %addr.b
+ %addr.a = getelementptr i32, ptr %a, i32 %iv
+ %addr.b = getelementptr i32, ptr %b, i32 %iv
+ %data.a = load i32, ptr %addr.a
+ %data.b = load i32, ptr %addr.b
%res = add i32 %data.a, %data.b
- %addr.c = getelementptr i32, i32* %c, i32 %iv
- store i32 %res, i32* %addr.c
+ %addr.c = getelementptr i32, ptr %c, i32 %iv
+ store i32 %res, ptr %addr.c
%count = add nuw i32 %iv, 1
%end = icmp ne i32 %count, 100
br i1 %end, label %loop, label %exit
ret void
}
-define void @test_i32_add_minsize(i32* %a, i32* %b, i32* %c) #1 {
+define void @test_i32_add_minsize(ptr %a, ptr %b, ptr %c) #1 {
; CHECK-V8-LABEL: @test_i32_add_minsize(
; CHECK-V8-NEXT: entry:
; CHECK-V8-NEXT: br label [[LOOP:%.*]]
; CHECK-V8: loop:
; CHECK-V8-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[COUNT_1:%.*]], [[LOOP]] ]
-; CHECK-V8-NEXT: [[ADDR_A:%.*]] = getelementptr i32, i32* [[A:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: [[ADDR_B:%.*]] = getelementptr i32, i32* [[B:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: [[DATA_A:%.*]] = load i32, i32* [[ADDR_A]], align 4
-; CHECK-V8-NEXT: [[DATA_B:%.*]] = load i32, i32* [[ADDR_B]], align 4
+; CHECK-V8-NEXT: [[ADDR_A:%.*]] = getelementptr i32, ptr [[A:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: [[ADDR_B:%.*]] = getelementptr i32, ptr [[B:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: [[DATA_A:%.*]] = load i32, ptr [[ADDR_A]], align 4
+; CHECK-V8-NEXT: [[DATA_B:%.*]] = load i32, ptr [[ADDR_B]], align 4
; CHECK-V8-NEXT: [[RES:%.*]] = add i32 [[DATA_A]], [[DATA_B]]
-; CHECK-V8-NEXT: [[ADDR_C:%.*]] = getelementptr i32, i32* [[C:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: store i32 [[RES]], i32* [[ADDR_C]], align 4
+; CHECK-V8-NEXT: [[ADDR_C:%.*]] = getelementptr i32, ptr [[C:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: store i32 [[RES]], ptr [[ADDR_C]], align 4
; CHECK-V8-NEXT: [[COUNT:%.*]] = add nuw nsw i32 [[IV]], 1
-; CHECK-V8-NEXT: [[ADDR_A_1:%.*]] = getelementptr i32, i32* [[A]], i32 [[COUNT]]
-; CHECK-V8-NEXT: [[ADDR_B_1:%.*]] = getelementptr i32, i32* [[B]], i32 [[COUNT]]
-; CHECK-V8-NEXT: [[DATA_A_1:%.*]] = load i32, i32* [[ADDR_A_1]], align 4
-; CHECK-V8-NEXT: [[DATA_B_1:%.*]] = load i32, i32* [[ADDR_B_1]], align 4
+; CHECK-V8-NEXT: [[ADDR_A_1:%.*]] = getelementptr i32, ptr [[A]], i32 [[COUNT]]
+; CHECK-V8-NEXT: [[ADDR_B_1:%.*]] = getelementptr i32, ptr [[B]], i32 [[COUNT]]
+; CHECK-V8-NEXT: [[DATA_A_1:%.*]] = load i32, ptr [[ADDR_A_1]], align 4
+; CHECK-V8-NEXT: [[DATA_B_1:%.*]] = load i32, ptr [[ADDR_B_1]], align 4
; CHECK-V8-NEXT: [[RES_1:%.*]] = add i32 [[DATA_A_1]], [[DATA_B_1]]
-; CHECK-V8-NEXT: [[ADDR_C_1:%.*]] = getelementptr i32, i32* [[C]], i32 [[COUNT]]
-; CHECK-V8-NEXT: store i32 [[RES_1]], i32* [[ADDR_C_1]], align 4
+; CHECK-V8-NEXT: [[ADDR_C_1:%.*]] = getelementptr i32, ptr [[C]], i32 [[COUNT]]
+; CHECK-V8-NEXT: store i32 [[RES_1]], ptr [[ADDR_C_1]], align 4
; CHECK-V8-NEXT: [[COUNT_1]] = add nuw nsw i32 [[COUNT]], 1
; CHECK-V8-NEXT: [[END_1:%.*]] = icmp ne i32 [[COUNT_1]], 100
; CHECK-V8-NEXT: br i1 [[END_1]], label [[LOOP]], label [[EXIT:%.*]]
loop:
%iv = phi i32 [ 0, %entry ], [ %count, %loop ]
- %addr.a = getelementptr i32, i32* %a, i32 %iv
- %addr.b = getelementptr i32, i32* %b, i32 %iv
- %data.a = load i32, i32* %addr.a
- %data.b = load i32, i32* %addr.b
+ %addr.a = getelementptr i32, ptr %a, i32 %iv
+ %addr.b = getelementptr i32, ptr %b, i32 %iv
+ %data.a = load i32, ptr %addr.a
+ %data.b = load i32, ptr %addr.b
%res = add i32 %data.a, %data.b
- %addr.c = getelementptr i32, i32* %c, i32 %iv
- store i32 %res, i32* %addr.c
+ %addr.c = getelementptr i32, ptr %c, i32 %iv
+ store i32 %res, ptr %addr.c
%count = add nuw i32 %iv, 1
%end = icmp ne i32 %count, 100
br i1 %end, label %loop, label %exit
ret void
}
-define void @test_i64_add_optsize(i64* %a, i64* %b, i64* %c) #0 {
+define void @test_i64_add_optsize(ptr %a, ptr %b, ptr %c) #0 {
; CHECK-V8-LABEL: @test_i64_add_optsize(
; CHECK-V8-NEXT: entry:
; CHECK-V8-NEXT: br label [[LOOP:%.*]]
; CHECK-V8: loop:
; CHECK-V8-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[COUNT_1:%.*]], [[LOOP]] ]
-; CHECK-V8-NEXT: [[ADDR_A:%.*]] = getelementptr i64, i64* [[A:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: [[ADDR_B:%.*]] = getelementptr i64, i64* [[B:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: [[DATA_A:%.*]] = load i64, i64* [[ADDR_A]], align 4
-; CHECK-V8-NEXT: [[DATA_B:%.*]] = load i64, i64* [[ADDR_B]], align 4
+; CHECK-V8-NEXT: [[ADDR_A:%.*]] = getelementptr i64, ptr [[A:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: [[ADDR_B:%.*]] = getelementptr i64, ptr [[B:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: [[DATA_A:%.*]] = load i64, ptr [[ADDR_A]], align 4
+; CHECK-V8-NEXT: [[DATA_B:%.*]] = load i64, ptr [[ADDR_B]], align 4
; CHECK-V8-NEXT: [[RES:%.*]] = add i64 [[DATA_A]], [[DATA_B]]
-; CHECK-V8-NEXT: [[ADDR_C:%.*]] = getelementptr i64, i64* [[C:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: store i64 [[RES]], i64* [[ADDR_C]], align 4
+; CHECK-V8-NEXT: [[ADDR_C:%.*]] = getelementptr i64, ptr [[C:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: store i64 [[RES]], ptr [[ADDR_C]], align 4
; CHECK-V8-NEXT: [[COUNT:%.*]] = add nuw nsw i32 [[IV]], 1
-; CHECK-V8-NEXT: [[ADDR_A_1:%.*]] = getelementptr i64, i64* [[A]], i32 [[COUNT]]
-; CHECK-V8-NEXT: [[ADDR_B_1:%.*]] = getelementptr i64, i64* [[B]], i32 [[COUNT]]
-; CHECK-V8-NEXT: [[DATA_A_1:%.*]] = load i64, i64* [[ADDR_A_1]], align 4
-; CHECK-V8-NEXT: [[DATA_B_1:%.*]] = load i64, i64* [[ADDR_B_1]], align 4
+; CHECK-V8-NEXT: [[ADDR_A_1:%.*]] = getelementptr i64, ptr [[A]], i32 [[COUNT]]
+; CHECK-V8-NEXT: [[ADDR_B_1:%.*]] = getelementptr i64, ptr [[B]], i32 [[COUNT]]
+; CHECK-V8-NEXT: [[DATA_A_1:%.*]] = load i64, ptr [[ADDR_A_1]], align 4
+; CHECK-V8-NEXT: [[DATA_B_1:%.*]] = load i64, ptr [[ADDR_B_1]], align 4
; CHECK-V8-NEXT: [[RES_1:%.*]] = add i64 [[DATA_A_1]], [[DATA_B_1]]
-; CHECK-V8-NEXT: [[ADDR_C_1:%.*]] = getelementptr i64, i64* [[C]], i32 [[COUNT]]
-; CHECK-V8-NEXT: store i64 [[RES_1]], i64* [[ADDR_C_1]], align 4
+; CHECK-V8-NEXT: [[ADDR_C_1:%.*]] = getelementptr i64, ptr [[C]], i32 [[COUNT]]
+; CHECK-V8-NEXT: store i64 [[RES_1]], ptr [[ADDR_C_1]], align 4
; CHECK-V8-NEXT: [[COUNT_1]] = add nuw nsw i32 [[COUNT]], 1
; CHECK-V8-NEXT: [[END_1:%.*]] = icmp ne i32 [[COUNT_1]], 100
; CHECK-V8-NEXT: br i1 [[END_1]], label [[LOOP]], label [[EXIT:%.*]]
loop:
%iv = phi i32 [ 0, %entry ], [ %count, %loop ]
- %addr.a = getelementptr i64, i64* %a, i32 %iv
- %addr.b = getelementptr i64, i64* %b, i32 %iv
- %data.a = load i64, i64* %addr.a
- %data.b = load i64, i64* %addr.b
+ %addr.a = getelementptr i64, ptr %a, i32 %iv
+ %addr.b = getelementptr i64, ptr %b, i32 %iv
+ %data.a = load i64, ptr %addr.a
+ %data.b = load i64, ptr %addr.b
%res = add i64 %data.a, %data.b
- %addr.c = getelementptr i64, i64* %c, i32 %iv
- store i64 %res, i64* %addr.c
+ %addr.c = getelementptr i64, ptr %c, i32 %iv
+ store i64 %res, ptr %addr.c
%count = add nuw i32 %iv, 1
%end = icmp ne i32 %count, 100
br i1 %end, label %loop, label %exit
ret void
}
-define void @test_i64_add_minsize(i64* %a, i64* %b, i64* %c) #1 {
+define void @test_i64_add_minsize(ptr %a, ptr %b, ptr %c) #1 {
; CHECK-V8-LABEL: @test_i64_add_minsize(
; CHECK-V8-NEXT: entry:
; CHECK-V8-NEXT: br label [[LOOP:%.*]]
; CHECK-V8: loop:
; CHECK-V8-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[COUNT_1:%.*]], [[LOOP]] ]
-; CHECK-V8-NEXT: [[ADDR_A:%.*]] = getelementptr i64, i64* [[A:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: [[ADDR_B:%.*]] = getelementptr i64, i64* [[B:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: [[DATA_A:%.*]] = load i64, i64* [[ADDR_A]], align 4
-; CHECK-V8-NEXT: [[DATA_B:%.*]] = load i64, i64* [[ADDR_B]], align 4
+; CHECK-V8-NEXT: [[ADDR_A:%.*]] = getelementptr i64, ptr [[A:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: [[ADDR_B:%.*]] = getelementptr i64, ptr [[B:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: [[DATA_A:%.*]] = load i64, ptr [[ADDR_A]], align 4
+; CHECK-V8-NEXT: [[DATA_B:%.*]] = load i64, ptr [[ADDR_B]], align 4
; CHECK-V8-NEXT: [[RES:%.*]] = add i64 [[DATA_A]], [[DATA_B]]
-; CHECK-V8-NEXT: [[ADDR_C:%.*]] = getelementptr i64, i64* [[C:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: store i64 [[RES]], i64* [[ADDR_C]], align 4
+; CHECK-V8-NEXT: [[ADDR_C:%.*]] = getelementptr i64, ptr [[C:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: store i64 [[RES]], ptr [[ADDR_C]], align 4
; CHECK-V8-NEXT: [[COUNT:%.*]] = add nuw nsw i32 [[IV]], 1
-; CHECK-V8-NEXT: [[ADDR_A_1:%.*]] = getelementptr i64, i64* [[A]], i32 [[COUNT]]
-; CHECK-V8-NEXT: [[ADDR_B_1:%.*]] = getelementptr i64, i64* [[B]], i32 [[COUNT]]
-; CHECK-V8-NEXT: [[DATA_A_1:%.*]] = load i64, i64* [[ADDR_A_1]], align 4
-; CHECK-V8-NEXT: [[DATA_B_1:%.*]] = load i64, i64* [[ADDR_B_1]], align 4
+; CHECK-V8-NEXT: [[ADDR_A_1:%.*]] = getelementptr i64, ptr [[A]], i32 [[COUNT]]
+; CHECK-V8-NEXT: [[ADDR_B_1:%.*]] = getelementptr i64, ptr [[B]], i32 [[COUNT]]
+; CHECK-V8-NEXT: [[DATA_A_1:%.*]] = load i64, ptr [[ADDR_A_1]], align 4
+; CHECK-V8-NEXT: [[DATA_B_1:%.*]] = load i64, ptr [[ADDR_B_1]], align 4
; CHECK-V8-NEXT: [[RES_1:%.*]] = add i64 [[DATA_A_1]], [[DATA_B_1]]
-; CHECK-V8-NEXT: [[ADDR_C_1:%.*]] = getelementptr i64, i64* [[C]], i32 [[COUNT]]
-; CHECK-V8-NEXT: store i64 [[RES_1]], i64* [[ADDR_C_1]], align 4
+; CHECK-V8-NEXT: [[ADDR_C_1:%.*]] = getelementptr i64, ptr [[C]], i32 [[COUNT]]
+; CHECK-V8-NEXT: store i64 [[RES_1]], ptr [[ADDR_C_1]], align 4
; CHECK-V8-NEXT: [[COUNT_1]] = add nuw nsw i32 [[COUNT]], 1
; CHECK-V8-NEXT: [[END_1:%.*]] = icmp ne i32 [[COUNT_1]], 100
; CHECK-V8-NEXT: br i1 [[END_1]], label [[LOOP]], label [[EXIT:%.*]]
loop:
%iv = phi i32 [ 0, %entry ], [ %count, %loop ]
- %addr.a = getelementptr i64, i64* %a, i32 %iv
- %addr.b = getelementptr i64, i64* %b, i32 %iv
- %data.a = load i64, i64* %addr.a
- %data.b = load i64, i64* %addr.b
+ %addr.a = getelementptr i64, ptr %a, i32 %iv
+ %addr.b = getelementptr i64, ptr %b, i32 %iv
+ %data.a = load i64, ptr %addr.a
+ %data.b = load i64, ptr %addr.b
%res = add i64 %data.a, %data.b
- %addr.c = getelementptr i64, i64* %c, i32 %iv
- store i64 %res, i64* %addr.c
+ %addr.c = getelementptr i64, ptr %c, i32 %iv
+ store i64 %res, ptr %addr.c
%count = add nuw i32 %iv, 1
%end = icmp ne i32 %count, 100
br i1 %end, label %loop, label %exit
ret void
}
-define i32 @test_i32_select_optsize(i32* %a, i32* %b, i32* %c) #0 {
+define i32 @test_i32_select_optsize(ptr %a, ptr %b, ptr %c) #0 {
; CHECK-V8-LABEL: @test_i32_select_optsize(
; CHECK-V8-NEXT: entry:
; CHECK-V8-NEXT: br label [[LOOP:%.*]]
; CHECK-V8: loop:
; CHECK-V8-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[COUNT_1:%.*]], [[LOOP]] ]
; CHECK-V8-NEXT: [[ACC:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ACC_NEXT_1:%.*]], [[LOOP]] ]
-; CHECK-V8-NEXT: [[ADDR_A:%.*]] = getelementptr i32, i32* [[A:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: [[ADDR_B:%.*]] = getelementptr i32, i32* [[B:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: [[DATA_A:%.*]] = load i32, i32* [[ADDR_A]], align 4
-; CHECK-V8-NEXT: [[DATA_B:%.*]] = load i32, i32* [[ADDR_B]], align 4
+; CHECK-V8-NEXT: [[ADDR_A:%.*]] = getelementptr i32, ptr [[A:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: [[ADDR_B:%.*]] = getelementptr i32, ptr [[B:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: [[DATA_A:%.*]] = load i32, ptr [[ADDR_A]], align 4
+; CHECK-V8-NEXT: [[DATA_B:%.*]] = load i32, ptr [[ADDR_B]], align 4
; CHECK-V8-NEXT: [[UGT:%.*]] = icmp ugt i32 [[DATA_A]], [[DATA_B]]
; CHECK-V8-NEXT: [[UMAX:%.*]] = select i1 [[UGT]], i32 [[DATA_A]], i32 [[DATA_B]]
; CHECK-V8-NEXT: [[ACC_NEXT:%.*]] = add i32 [[UMAX]], [[ACC]]
-; CHECK-V8-NEXT: [[ADDR_C:%.*]] = getelementptr i32, i32* [[C:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: store i32 [[UMAX]], i32* [[ADDR_C]], align 4
+; CHECK-V8-NEXT: [[ADDR_C:%.*]] = getelementptr i32, ptr [[C:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: store i32 [[UMAX]], ptr [[ADDR_C]], align 4
; CHECK-V8-NEXT: [[COUNT:%.*]] = add nuw nsw i32 [[IV]], 1
-; CHECK-V8-NEXT: [[ADDR_A_1:%.*]] = getelementptr i32, i32* [[A]], i32 [[COUNT]]
-; CHECK-V8-NEXT: [[ADDR_B_1:%.*]] = getelementptr i32, i32* [[B]], i32 [[COUNT]]
-; CHECK-V8-NEXT: [[DATA_A_1:%.*]] = load i32, i32* [[ADDR_A_1]], align 4
-; CHECK-V8-NEXT: [[DATA_B_1:%.*]] = load i32, i32* [[ADDR_B_1]], align 4
+; CHECK-V8-NEXT: [[ADDR_A_1:%.*]] = getelementptr i32, ptr [[A]], i32 [[COUNT]]
+; CHECK-V8-NEXT: [[ADDR_B_1:%.*]] = getelementptr i32, ptr [[B]], i32 [[COUNT]]
+; CHECK-V8-NEXT: [[DATA_A_1:%.*]] = load i32, ptr [[ADDR_A_1]], align 4
+; CHECK-V8-NEXT: [[DATA_B_1:%.*]] = load i32, ptr [[ADDR_B_1]], align 4
; CHECK-V8-NEXT: [[UGT_1:%.*]] = icmp ugt i32 [[DATA_A_1]], [[DATA_B_1]]
; CHECK-V8-NEXT: [[UMAX_1:%.*]] = select i1 [[UGT_1]], i32 [[DATA_A_1]], i32 [[DATA_B_1]]
; CHECK-V8-NEXT: [[ACC_NEXT_1]] = add i32 [[UMAX_1]], [[ACC_NEXT]]
-; CHECK-V8-NEXT: [[ADDR_C_1:%.*]] = getelementptr i32, i32* [[C]], i32 [[COUNT]]
-; CHECK-V8-NEXT: store i32 [[UMAX_1]], i32* [[ADDR_C_1]], align 4
+; CHECK-V8-NEXT: [[ADDR_C_1:%.*]] = getelementptr i32, ptr [[C]], i32 [[COUNT]]
+; CHECK-V8-NEXT: store i32 [[UMAX_1]], ptr [[ADDR_C_1]], align 4
; CHECK-V8-NEXT: [[COUNT_1]] = add nuw nsw i32 [[COUNT]], 1
; CHECK-V8-NEXT: [[END_1:%.*]] = icmp ne i32 [[COUNT_1]], 100
; CHECK-V8-NEXT: br i1 [[END_1]], label [[LOOP]], label [[EXIT:%.*]]
loop:
%iv = phi i32 [ 0, %entry ], [ %count, %loop ]
%acc = phi i32 [ 0, %entry], [ %acc.next, %loop ]
- %addr.a = getelementptr i32, i32* %a, i32 %iv
- %addr.b = getelementptr i32, i32* %b, i32 %iv
- %data.a = load i32, i32* %addr.a
- %data.b = load i32, i32* %addr.b
+ %addr.a = getelementptr i32, ptr %a, i32 %iv
+ %addr.b = getelementptr i32, ptr %b, i32 %iv
+ %data.a = load i32, ptr %addr.a
+ %data.b = load i32, ptr %addr.b
%ugt = icmp ugt i32 %data.a, %data.b
%umax = select i1 %ugt, i32 %data.a, i32 %data.b
%acc.next = add i32 %umax, %acc
- %addr.c = getelementptr i32, i32* %c, i32 %iv
- store i32 %umax, i32* %addr.c
+ %addr.c = getelementptr i32, ptr %c, i32 %iv
+ store i32 %umax, ptr %addr.c
%count = add nuw i32 %iv, 1
%end = icmp ne i32 %count, 100
br i1 %end, label %loop, label %exit
ret i32 %acc.next
}
-define i32 @test_i32_select_minsize(i32* %a, i32* %b, i32* %c) #1 {
+define i32 @test_i32_select_minsize(ptr %a, ptr %b, ptr %c) #1 {
; CHECK-V8-LABEL: @test_i32_select_minsize(
; CHECK-V8-NEXT: entry:
; CHECK-V8-NEXT: br label [[LOOP:%.*]]
; CHECK-V8: loop:
; CHECK-V8-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[COUNT_1:%.*]], [[LOOP]] ]
; CHECK-V8-NEXT: [[ACC:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ACC_NEXT_1:%.*]], [[LOOP]] ]
-; CHECK-V8-NEXT: [[ADDR_A:%.*]] = getelementptr i32, i32* [[A:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: [[ADDR_B:%.*]] = getelementptr i32, i32* [[B:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: [[DATA_A:%.*]] = load i32, i32* [[ADDR_A]], align 4
-; CHECK-V8-NEXT: [[DATA_B:%.*]] = load i32, i32* [[ADDR_B]], align 4
+; CHECK-V8-NEXT: [[ADDR_A:%.*]] = getelementptr i32, ptr [[A:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: [[ADDR_B:%.*]] = getelementptr i32, ptr [[B:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: [[DATA_A:%.*]] = load i32, ptr [[ADDR_A]], align 4
+; CHECK-V8-NEXT: [[DATA_B:%.*]] = load i32, ptr [[ADDR_B]], align 4
; CHECK-V8-NEXT: [[UGT:%.*]] = icmp ugt i32 [[DATA_A]], [[DATA_B]]
; CHECK-V8-NEXT: [[UMAX:%.*]] = select i1 [[UGT]], i32 [[DATA_A]], i32 [[DATA_B]]
; CHECK-V8-NEXT: [[ACC_NEXT:%.*]] = add i32 [[UMAX]], [[ACC]]
-; CHECK-V8-NEXT: [[ADDR_C:%.*]] = getelementptr i32, i32* [[C:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: store i32 [[UMAX]], i32* [[ADDR_C]], align 4
+; CHECK-V8-NEXT: [[ADDR_C:%.*]] = getelementptr i32, ptr [[C:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: store i32 [[UMAX]], ptr [[ADDR_C]], align 4
; CHECK-V8-NEXT: [[COUNT:%.*]] = add nuw nsw i32 [[IV]], 1
-; CHECK-V8-NEXT: [[ADDR_A_1:%.*]] = getelementptr i32, i32* [[A]], i32 [[COUNT]]
-; CHECK-V8-NEXT: [[ADDR_B_1:%.*]] = getelementptr i32, i32* [[B]], i32 [[COUNT]]
-; CHECK-V8-NEXT: [[DATA_A_1:%.*]] = load i32, i32* [[ADDR_A_1]], align 4
-; CHECK-V8-NEXT: [[DATA_B_1:%.*]] = load i32, i32* [[ADDR_B_1]], align 4
+; CHECK-V8-NEXT: [[ADDR_A_1:%.*]] = getelementptr i32, ptr [[A]], i32 [[COUNT]]
+; CHECK-V8-NEXT: [[ADDR_B_1:%.*]] = getelementptr i32, ptr [[B]], i32 [[COUNT]]
+; CHECK-V8-NEXT: [[DATA_A_1:%.*]] = load i32, ptr [[ADDR_A_1]], align 4
+; CHECK-V8-NEXT: [[DATA_B_1:%.*]] = load i32, ptr [[ADDR_B_1]], align 4
; CHECK-V8-NEXT: [[UGT_1:%.*]] = icmp ugt i32 [[DATA_A_1]], [[DATA_B_1]]
; CHECK-V8-NEXT: [[UMAX_1:%.*]] = select i1 [[UGT_1]], i32 [[DATA_A_1]], i32 [[DATA_B_1]]
; CHECK-V8-NEXT: [[ACC_NEXT_1]] = add i32 [[UMAX_1]], [[ACC_NEXT]]
-; CHECK-V8-NEXT: [[ADDR_C_1:%.*]] = getelementptr i32, i32* [[C]], i32 [[COUNT]]
-; CHECK-V8-NEXT: store i32 [[UMAX_1]], i32* [[ADDR_C_1]], align 4
+; CHECK-V8-NEXT: [[ADDR_C_1:%.*]] = getelementptr i32, ptr [[C]], i32 [[COUNT]]
+; CHECK-V8-NEXT: store i32 [[UMAX_1]], ptr [[ADDR_C_1]], align 4
; CHECK-V8-NEXT: [[COUNT_1]] = add nuw nsw i32 [[COUNT]], 1
; CHECK-V8-NEXT: [[END_1:%.*]] = icmp ne i32 [[COUNT_1]], 100
; CHECK-V8-NEXT: br i1 [[END_1]], label [[LOOP]], label [[EXIT:%.*]]
loop:
%iv = phi i32 [ 0, %entry ], [ %count, %loop ]
%acc = phi i32 [ 0, %entry], [ %acc.next, %loop ]
- %addr.a = getelementptr i32, i32* %a, i32 %iv
- %addr.b = getelementptr i32, i32* %b, i32 %iv
- %data.a = load i32, i32* %addr.a
- %data.b = load i32, i32* %addr.b
+ %addr.a = getelementptr i32, ptr %a, i32 %iv
+ %addr.b = getelementptr i32, ptr %b, i32 %iv
+ %data.a = load i32, ptr %addr.a
+ %data.b = load i32, ptr %addr.b
%ugt = icmp ugt i32 %data.a, %data.b
%umax = select i1 %ugt, i32 %data.a, i32 %data.b
%acc.next = add i32 %umax, %acc
- %addr.c = getelementptr i32, i32* %c, i32 %iv
- store i32 %umax, i32* %addr.c
+ %addr.c = getelementptr i32, ptr %c, i32 %iv
+ store i32 %umax, ptr %addr.c
%count = add nuw i32 %iv, 1
%end = icmp ne i32 %count, 100
br i1 %end, label %loop, label %exit
ret i32 %acc.next
}
-define i64 @test_i64_select_optsize(i64* %a, i64* %b, i64* %c) #0 {
+define i64 @test_i64_select_optsize(ptr %a, ptr %b, ptr %c) #0 {
; CHECK-V8-LABEL: @test_i64_select_optsize(
; CHECK-V8-NEXT: entry:
; CHECK-V8-NEXT: br label [[LOOP:%.*]]
; CHECK-V8: loop:
; CHECK-V8-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[COUNT:%.*]], [[LOOP]] ]
; CHECK-V8-NEXT: [[ACC:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[ACC_NEXT:%.*]], [[LOOP]] ]
-; CHECK-V8-NEXT: [[ADDR_A:%.*]] = getelementptr i64, i64* [[A:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: [[ADDR_B:%.*]] = getelementptr i64, i64* [[B:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: [[DATA_A:%.*]] = load i64, i64* [[ADDR_A]], align 4
-; CHECK-V8-NEXT: [[DATA_B:%.*]] = load i64, i64* [[ADDR_B]], align 4
+; CHECK-V8-NEXT: [[ADDR_A:%.*]] = getelementptr i64, ptr [[A:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: [[ADDR_B:%.*]] = getelementptr i64, ptr [[B:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: [[DATA_A:%.*]] = load i64, ptr [[ADDR_A]], align 4
+; CHECK-V8-NEXT: [[DATA_B:%.*]] = load i64, ptr [[ADDR_B]], align 4
; CHECK-V8-NEXT: [[UGT:%.*]] = icmp ugt i64 [[DATA_A]], [[DATA_B]]
; CHECK-V8-NEXT: [[UMAX:%.*]] = select i1 [[UGT]], i64 [[DATA_A]], i64 [[DATA_B]]
; CHECK-V8-NEXT: [[ACC_NEXT]] = add i64 [[UMAX]], [[ACC]]
-; CHECK-V8-NEXT: [[ADDR_C:%.*]] = getelementptr i64, i64* [[C:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: store i64 [[UMAX]], i64* [[ADDR_C]], align 4
+; CHECK-V8-NEXT: [[ADDR_C:%.*]] = getelementptr i64, ptr [[C:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: store i64 [[UMAX]], ptr [[ADDR_C]], align 4
; CHECK-V8-NEXT: [[COUNT]] = add nuw i32 [[IV]], 1
; CHECK-V8-NEXT: [[END:%.*]] = icmp ne i32 [[COUNT]], 100
; CHECK-V8-NEXT: br i1 [[END]], label [[LOOP]], label [[EXIT:%.*]]
loop:
%iv = phi i32 [ 0, %entry ], [ %count, %loop ]
%acc = phi i64 [ 0, %entry], [ %acc.next, %loop ]
- %addr.a = getelementptr i64, i64* %a, i32 %iv
- %addr.b = getelementptr i64, i64* %b, i32 %iv
- %data.a = load i64, i64* %addr.a
- %data.b = load i64, i64* %addr.b
+ %addr.a = getelementptr i64, ptr %a, i32 %iv
+ %addr.b = getelementptr i64, ptr %b, i32 %iv
+ %data.a = load i64, ptr %addr.a
+ %data.b = load i64, ptr %addr.b
%ugt = icmp ugt i64 %data.a, %data.b
%umax = select i1 %ugt, i64 %data.a, i64 %data.b
%acc.next = add i64 %umax, %acc
- %addr.c = getelementptr i64, i64* %c, i32 %iv
- store i64 %umax, i64* %addr.c
+ %addr.c = getelementptr i64, ptr %c, i32 %iv
+ store i64 %umax, ptr %addr.c
%count = add nuw i32 %iv, 1
%end = icmp ne i32 %count, 100
br i1 %end, label %loop, label %exit
ret i64 %acc.next
}
-define i64 @test_i64_select_minsize(i64* %a, i64* %b, i64* %c) #1 {
+define i64 @test_i64_select_minsize(ptr %a, ptr %b, ptr %c) #1 {
; CHECK-V8-LABEL: @test_i64_select_minsize(
; CHECK-V8-NEXT: entry:
; CHECK-V8-NEXT: br label [[LOOP:%.*]]
; CHECK-V8: loop:
; CHECK-V8-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[COUNT:%.*]], [[LOOP]] ]
; CHECK-V8-NEXT: [[ACC:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[ACC_NEXT:%.*]], [[LOOP]] ]
-; CHECK-V8-NEXT: [[ADDR_A:%.*]] = getelementptr i64, i64* [[A:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: [[ADDR_B:%.*]] = getelementptr i64, i64* [[B:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: [[DATA_A:%.*]] = load i64, i64* [[ADDR_A]], align 4
-; CHECK-V8-NEXT: [[DATA_B:%.*]] = load i64, i64* [[ADDR_B]], align 4
+; CHECK-V8-NEXT: [[ADDR_A:%.*]] = getelementptr i64, ptr [[A:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: [[ADDR_B:%.*]] = getelementptr i64, ptr [[B:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: [[DATA_A:%.*]] = load i64, ptr [[ADDR_A]], align 4
+; CHECK-V8-NEXT: [[DATA_B:%.*]] = load i64, ptr [[ADDR_B]], align 4
; CHECK-V8-NEXT: [[UGT:%.*]] = icmp ugt i64 [[DATA_A]], [[DATA_B]]
; CHECK-V8-NEXT: [[UMAX:%.*]] = select i1 [[UGT]], i64 [[DATA_A]], i64 [[DATA_B]]
; CHECK-V8-NEXT: [[ACC_NEXT]] = add i64 [[UMAX]], [[ACC]]
-; CHECK-V8-NEXT: [[ADDR_C:%.*]] = getelementptr i64, i64* [[C:%.*]], i32 [[IV]]
-; CHECK-V8-NEXT: store i64 [[UMAX]], i64* [[ADDR_C]], align 4
+; CHECK-V8-NEXT: [[ADDR_C:%.*]] = getelementptr i64, ptr [[C:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT: store i64 [[UMAX]], ptr [[ADDR_C]], align 4
; CHECK-V8-NEXT: [[COUNT]] = add nuw i32 [[IV]], 1
; CHECK-V8-NEXT: [[END:%.*]] = icmp ne i32 [[COUNT]], 100
; CHECK-V8-NEXT: br i1 [[END]], label [[LOOP]], label [[EXIT:%.*]]
loop:
%iv = phi i32 [ 0, %entry ], [ %count, %loop ]
%acc = phi i64 [ 0, %entry], [ %acc.next, %loop ]
- %addr.a = getelementptr i64, i64* %a, i32 %iv
- %addr.b = getelementptr i64, i64* %b, i32 %iv
- %data.a = load i64, i64* %addr.a
- %data.b = load i64, i64* %addr.b
+ %addr.a = getelementptr i64, ptr %a, i32 %iv
+ %addr.b = getelementptr i64, ptr %b, i32 %iv
+ %data.a = load i64, ptr %addr.a
+ %data.b = load i64, ptr %addr.b
%ugt = icmp ugt i64 %data.a, %data.b
%umax = select i1 %ugt, i64 %data.a, i64 %data.b
%acc.next = add i64 %umax, %acc
- %addr.c = getelementptr i64, i64* %c, i32 %iv
- store i64 %umax, i64* %addr.c
+ %addr.c = getelementptr i64, ptr %c, i32 %iv
+ store i64 %umax, ptr %addr.c
%count = add nuw i32 %iv, 1
%end = icmp ne i32 %count, 100
br i1 %end, label %loop, label %exit
; RUN: opt -mtriple=thumbv7em -mcpu=cortex-m7 -passes=loop-unroll -S %s -o - | FileCheck %s --check-prefix=CHECK-UNROLL
; CHECK-LABEL: partial
-define arm_aapcs_vfpcc void @partial(i32* nocapture %C, i32* nocapture readonly %A, i32* nocapture readonly %B) local_unnamed_addr #0 {
+define arm_aapcs_vfpcc void @partial(ptr nocapture %C, ptr nocapture readonly %A, ptr nocapture readonly %B) local_unnamed_addr #0 {
entry:
br label %for.body
; CHECK-UNROLL: br i1 [[CMP]], label [[END:%[a-z.]+]], label %for.body
%i.08 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.08
- %0 = load i32, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32, i32* %B, i32 %i.08
- %1 = load i32, i32* %arrayidx1, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i.08
+ %0 = load i32, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr %B, i32 %i.08
+ %1 = load i32, ptr %arrayidx1, align 4
%mul = mul nsw i32 %1, %0
- %arrayidx2 = getelementptr inbounds i32, i32* %C, i32 %i.08
- store i32 %mul, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %C, i32 %i.08
+ store i32 %mul, ptr %arrayidx2, align 4
%inc = add nuw nsw i32 %i.08, 1
%exitcond = icmp eq i32 %inc, 1024
br i1 %exitcond, label %for.cond.cleanup, label %for.body
}
; CHECK-LABEL: runtime
-define arm_aapcs_vfpcc void @runtime(i32* nocapture %C, i32* nocapture readonly %A, i32* nocapture readonly %B, i32 %N) local_unnamed_addr #0 {
+define arm_aapcs_vfpcc void @runtime(ptr nocapture %C, ptr nocapture readonly %A, ptr nocapture readonly %B, i32 %N) local_unnamed_addr #0 {
entry:
%cmp8 = icmp eq i32 %N, 0
br i1 %cmp8, label %for.cond.cleanup, label %for.body
; CHECK-UNROLL: for.body.epil.2:
%i.09 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.09
- %0 = load i32, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32, i32* %B, i32 %i.09
- %1 = load i32, i32* %arrayidx1, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i.09
+ %0 = load i32, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr %B, i32 %i.09
+ %1 = load i32, ptr %arrayidx1, align 4
%mul = mul nsw i32 %1, %0
- %arrayidx2 = getelementptr inbounds i32, i32* %C, i32 %i.09
- store i32 %mul, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %C, i32 %i.09
+ store i32 %mul, ptr %arrayidx2, align 4
%inc = add nuw i32 %i.09, 1
%exitcond = icmp eq i32 %inc, %N
br i1 %exitcond, label %for.cond.cleanup, label %for.body
}
; CHECK-LABEL: nested_runtime
-define arm_aapcs_vfpcc void @nested_runtime(i32* nocapture %C, i16* nocapture readonly %A, i16* nocapture readonly %B, i32 %N) local_unnamed_addr #0 {
+define arm_aapcs_vfpcc void @nested_runtime(ptr nocapture %C, ptr nocapture readonly %A, ptr nocapture readonly %B, i32 %N) local_unnamed_addr #0 {
entry:
%cmp25 = icmp eq i32 %N, 0
br i1 %cmp25, label %for.cond.cleanup, label %for.body4.lr.ph
%w.024 = phi i32 [ 0, %for.body4.lr.ph ], [ %inc, %for.body4 ]
%add = add i32 %w.024, %mul
- %arrayidx = getelementptr inbounds i16, i16* %A, i32 %add
- %0 = load i16, i16* %arrayidx, align 2
+ %arrayidx = getelementptr inbounds i16, ptr %A, i32 %add
+ %0 = load i16, ptr %arrayidx, align 2
%conv = sext i16 %0 to i32
- %arrayidx5 = getelementptr inbounds i16, i16* %B, i32 %w.024
- %1 = load i16, i16* %arrayidx5, align 2
+ %arrayidx5 = getelementptr inbounds i16, ptr %B, i32 %w.024
+ %1 = load i16, ptr %arrayidx5, align 2
%conv6 = sext i16 %1 to i32
%mul7 = mul nsw i32 %conv6, %conv
- %arrayidx8 = getelementptr inbounds i32, i32* %C, i32 %w.024
- %2 = load i32, i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds i32, ptr %C, i32 %w.024
+ %2 = load i32, ptr %arrayidx8, align 4
%add9 = add nsw i32 %mul7, %2
- store i32 %add9, i32* %arrayidx8, align 4
+ store i32 %add9, ptr %arrayidx8, align 4
%inc = add nuw i32 %w.024, 1
%exitcond = icmp eq i32 %inc, %N
br i1 %exitcond, label %for.cond.cleanup3, label %for.body4
}
; CHECK-LABEL: loop_call
-define arm_aapcs_vfpcc void @loop_call(i32* nocapture %C, i32* nocapture readonly %A, i32* nocapture readonly %B) local_unnamed_addr #1 {
+define arm_aapcs_vfpcc void @loop_call(ptr nocapture %C, ptr nocapture readonly %A, ptr nocapture readonly %B) local_unnamed_addr #1 {
entry:
br label %for.body
; CHECK-UNROLL: br
%i.08 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.08
- %0 = load i32, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32, i32* %B, i32 %i.08
- %1 = load i32, i32* %arrayidx1, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i.08
+ %0 = load i32, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr %B, i32 %i.08
+ %1 = load i32, ptr %arrayidx1, align 4
%call = tail call arm_aapcs_vfpcc i32 @some_func(i32 %0, i32 %1) #3
- %arrayidx2 = getelementptr inbounds i32, i32* %C, i32 %i.08
- store i32 %call, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %C, i32 %i.08
+ store i32 %call, ptr %arrayidx2, align 4
%inc = add nuw nsw i32 %i.08, 1
%exitcond = icmp eq i32 %inc, 1024
br i1 %exitcond, label %for.cond.cleanup, label %for.body
}
; CHECK-LABEL: iterate_inc
-; CHECK-NOUNROLL: %n.addr.04 = phi %struct.Node* [ %1, %while.body ], [ %n, %while.body.preheader ]
-; CHECK-NOUNROLL: %tobool = icmp eq %struct.Node* %1, null
+; CHECK-NOUNROLL: %n.addr.04 = phi ptr [ %1, %while.body ], [ %n, %while.body.preheader ]
+; CHECK-NOUNROLL: %tobool = icmp eq ptr %1, null
; CHECK-NOUNROLL: br i1 %tobool
; CHECK-NOUNROLL-NOT: load
-; CHECK-UNROLL: [[CMP0:%[a-z.0-9]+]] = icmp eq %struct.Node* [[VAR0:%[a-z.0-9]+]], null
+; CHECK-UNROLL: [[CMP0:%[a-z.0-9]+]] = icmp eq ptr [[VAR0:%[a-z.0-9]+]], null
; CHECK-UNROLL: br i1 [[CMP0]], label [[END:%[a-z.0-9]+]]
-; CHECK-UNROLL: [[CMP1:%[a-z.0-9]+]] = icmp eq %struct.Node* [[VAR1:%[a-z.0-9]+]], null
+; CHECK-UNROLL: [[CMP1:%[a-z.0-9]+]] = icmp eq ptr [[VAR1:%[a-z.0-9]+]], null
; CHECK-UNROLL: br i1 [[CMP1]], label [[END]]
-; CHECK-UNROLL: [[CMP2:%[a-z.0-9]+]] = icmp eq %struct.Node* [[VAR2:%[a-z.0-9]+]], null
+; CHECK-UNROLL: [[CMP2:%[a-z.0-9]+]] = icmp eq ptr [[VAR2:%[a-z.0-9]+]], null
; CHECK-UNROLL: br i1 [[CMP2]], label [[END]]
-; CHECK-UNROLL: [[CMP3:%[a-z.0-9]+]] = icmp eq %struct.Node* [[VAR3:%[a-z.0-9]+]], null
+; CHECK-UNROLL: [[CMP3:%[a-z.0-9]+]] = icmp eq ptr [[VAR3:%[a-z.0-9]+]], null
; CHECK-UNROLL: br i1 [[CMP3]], label [[END]]
-; CHECK-UNROLL: [[CMP4:%[a-z.0-9]+]] = icmp eq %struct.Node* [[VAR4:%[a-z.0-9]+]], null
+; CHECK-UNROLL: [[CMP4:%[a-z.0-9]+]] = icmp eq ptr [[VAR4:%[a-z.0-9]+]], null
; CHECK-UNROLL: br i1 [[CMP4]], label [[END]]
; CHECK-UNROLL-NOT: load
-%struct.Node = type { %struct.Node*, i32 }
+%struct.Node = type { ptr, i32 }
-define arm_aapcscc void @iterate_inc(%struct.Node* %n) local_unnamed_addr #0 {
+define arm_aapcscc void @iterate_inc(ptr %n) local_unnamed_addr #0 {
entry:
- %tobool3 = icmp eq %struct.Node* %n, null
+ %tobool3 = icmp eq ptr %n, null
br i1 %tobool3, label %while.end, label %while.body.preheader
while.body.preheader:
br label %while.body
while.body:
- %n.addr.04 = phi %struct.Node* [ %1, %while.body ], [ %n, %while.body.preheader ]
- %val = getelementptr inbounds %struct.Node, %struct.Node* %n.addr.04, i32 0, i32 1
- %0 = load i32, i32* %val, align 4
+ %n.addr.04 = phi ptr [ %1, %while.body ], [ %n, %while.body.preheader ]
+ %val = getelementptr inbounds %struct.Node, ptr %n.addr.04, i32 0, i32 1
+ %0 = load i32, ptr %val, align 4
%add = add nsw i32 %0, 1
- store i32 %add, i32* %val, align 4
- %next = getelementptr inbounds %struct.Node, %struct.Node* %n.addr.04, i32 0, i32 0
- %1 = load %struct.Node*, %struct.Node** %next, align 4
- %tobool = icmp eq %struct.Node* %1, null
+ store i32 %add, ptr %val, align 4
+ %1 = load ptr, ptr %n.addr.04, align 4
+ %tobool = icmp eq ptr %1, null
br i1 %tobool, label %while.end, label %while.body
while.end:
declare i32 @get()
-define void @fully_unrolled_single_iteration(i32* %src) #0 {
+define void @fully_unrolled_single_iteration(ptr %src) #0 {
; CHECK-LABEL: @fully_unrolled_single_iteration(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ARR:%.*]] = alloca [4 x i32], align 4
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[SRC:%.*]]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
-; CHECK-NEXT: store i32 [[V]], i32* [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
-; CHECK-NEXT: call void @use(i32* nonnull [[PTR]])
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[SRC:%.*]]
+; CHECK-NEXT: store i32 [[V]], ptr [[ARR]], align 4
+; CHECK-NEXT: call void @use(ptr nonnull [[ARR]])
; CHECK-NEXT: ret void
;
entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %src.idx = getelementptr inbounds i32, i32* %src, i64 %indvars.iv
- %v = load i32, i32* %src.idx
- %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
- store i32 %v, i32* %arrayidx, align 4
+ %src.idx = getelementptr inbounds i32, ptr %src, i64 %indvars.iv
+ %v = load i32, ptr %src.idx
+ %arrayidx = getelementptr inbounds [4 x i32], ptr %arr, i64 0, i64 %indvars.iv
+ store i32 %v, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 1
br i1 %exitcond, label %for.cond.cleanup, label %for.body
for.cond.cleanup: ; preds = %for.cond
- %ptr = bitcast [4 x i32]* %arr to i32*
- call void @use(i32* nonnull %ptr) #4
+ call void @use(ptr nonnull %arr) #4
ret void
}
; CHECK-NEXT: [[ARR:%.*]] = alloca [4 x i32], align 4
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
-; CHECK-NEXT: store i32 16, i32* [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 1
-; CHECK-NEXT: store i32 4104, i32* [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 2
-; CHECK-NEXT: store i32 1048592, i32* [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 3
-; CHECK-NEXT: store i32 268435480, i32* [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
-; CHECK-NEXT: call void @use(i32* nonnull [[PTR]])
+; CHECK-NEXT: store i32 16, ptr [[ARR]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds [4 x i32], ptr [[ARR]], i64 0, i64 1
+; CHECK-NEXT: store i32 4104, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds [4 x i32], ptr [[ARR]], i64 0, i64 2
+; CHECK-NEXT: store i32 1048592, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds [4 x i32], ptr [[ARR]], i64 0, i64 3
+; CHECK-NEXT: store i32 268435480, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: call void @use(ptr nonnull [[ARR]])
; CHECK-NEXT: ret void
;
entry:
%shl.0 = shl i32 %indvars.iv.tr, 3
%shl.1 = shl i32 16, %shl.0
%or = or i32 %shl.1, %shl.0
- %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
- store i32 %or, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [4 x i32], ptr %arr, i64 0, i64 %indvars.iv
+ store i32 %or, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv, 3
br i1 %exitcond, label %for.cond.cleanup, label %for.body
for.cond.cleanup: ; preds = %for.cond
- %ptr = bitcast [4 x i32]* %arr to i32*
- call void @use(i32* nonnull %ptr) #4
+ call void @use(ptr nonnull %arr) #4
ret void
}
; CHECK-NEXT: [[ARR:%.*]] = alloca [4 x i32], align 4
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
-; CHECK-NEXT: store i32 16, i32* [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 1
-; CHECK-NEXT: store i32 4104, i32* [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 2
-; CHECK-NEXT: store i32 1048592, i32* [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 3
-; CHECK-NEXT: store i32 268435480, i32* [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
-; CHECK-NEXT: call void @use(i32* nonnull [[PTR]])
+; CHECK-NEXT: store i32 16, ptr [[ARR]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds [4 x i32], ptr [[ARR]], i64 0, i64 1
+; CHECK-NEXT: store i32 4104, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds [4 x i32], ptr [[ARR]], i64 0, i64 2
+; CHECK-NEXT: store i32 1048592, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds [4 x i32], ptr [[ARR]], i64 0, i64 3
+; CHECK-NEXT: store i32 268435480, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: call void @use(ptr nonnull [[ARR]])
; CHECK-NEXT: ret void
;
entry:
%shl.0 = shl i32 %indvars.iv.tr, 3
%shl.1 = shl i32 16, %shl.0
%or = or i32 %shl.1, %shl.0
- %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
- store i32 %or, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [4 x i32], ptr %arr, i64 0, i64 %indvars.iv
+ store i32 %or, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv, 3
br i1 %exitcond, label %for.cond.cleanup, label %for.body
for.cond.cleanup: ; preds = %for.cond
- %ptr = bitcast [4 x i32]* %arr to i32*
- call void @use(i32* nonnull %ptr) #4
+ call void @use(ptr nonnull %arr) #4
ret void
}
; CHECK-NEXT: [[SHL_0:%.*]] = shl i32 [[INDVARS_IV_TR]], 3
; CHECK-NEXT: [[SHL_1:%.*]] = shl i32 16, [[SHL_0]]
; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL_1]], [[SHL_0]]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 [[INDVARS_IV]]
-; CHECK-NEXT: store i32 [[OR]], i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], ptr [[ARR]], i64 0, i64 [[INDVARS_IV]]
+; CHECK-NEXT: store i32 [[OR]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV]], 7
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
-; CHECK-NEXT: call void @use(i32* nonnull [[PTR]])
+; CHECK-NEXT: call void @use(ptr nonnull [[ARR]])
; CHECK-NEXT: ret void
;
entry:
%shl.0 = shl i32 %indvars.iv.tr, 3
%shl.1 = shl i32 16, %shl.0
%or = or i32 %shl.1, %shl.0
- %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
- store i32 %or, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [4 x i32], ptr %arr, i64 0, i64 %indvars.iv
+ store i32 %or, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv, 7
br i1 %exitcond, label %for.cond.cleanup, label %for.body
for.cond.cleanup: ; preds = %for.cond
- %ptr = bitcast [4 x i32]* %arr to i32*
- call void @use(i32* nonnull %ptr) #4
+ call void @use(ptr nonnull %arr) #4
ret void
}
-declare void @use(i32*)
+declare void @use(ptr)
attributes #0 = { optsize }
attributes #1 = { minsize optsize }
; RUN: opt -passes=loop-unroll,simplifycfg,instcombine,simplifycfg -switch-range-to-icmp -S -mtriple arm-none-eabi -mcpu=cortex-m7 %s | FileCheck %s
; This test is meant to check that this loop is unrolled into three iterations.
-define void @test(i32* %x, i32 %n) {
+define void @test(ptr %x, i32 %n) {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[N:%.*]], -1
; CHECK-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[REM]], 0
; CHECK-NEXT: br i1 [[CMP7]], label [[WHILE_BODY:%.*]], label [[WHILE_END:%.*]]
; CHECK: while.body:
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[X:%.*]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[X:%.*]], align 4
; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP0]], 10
; CHECK-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: store i32 0, i32* [[X]], align 4
+; CHECK-NEXT: store i32 0, ptr [[X]], align 4
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
-; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, ptr [[X]], i64 1
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[REM]], 1
; CHECK-NEXT: br i1 [[CMP]], label [[WHILE_BODY_1:%.*]], label [[WHILE_END]]
; CHECK: while.body.1:
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[INCDEC_PTR]], align 4
; CHECK-NEXT: [[CMP1_1:%.*]] = icmp slt i32 [[TMP1]], 10
; CHECK-NEXT: br i1 [[CMP1_1]], label [[IF_THEN_1:%.*]], label [[IF_END_1:%.*]]
; CHECK: if.then.1:
-; CHECK-NEXT: store i32 0, i32* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[INCDEC_PTR]], align 4
; CHECK-NEXT: br label [[IF_END_1]]
; CHECK: if.end.1:
-; CHECK-NEXT: [[INCDEC_PTR_1:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR_1:%.*]] = getelementptr inbounds i32, ptr [[X]], i64 2
; CHECK-NEXT: [[CMP_1:%.*]] = icmp sgt i32 [[REM]], 2
; CHECK-NEXT: br i1 [[CMP_1]], label [[WHILE_BODY_2:%.*]], label [[WHILE_END]]
; CHECK: while.body.2:
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[INCDEC_PTR_1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[INCDEC_PTR_1]], align 4
; CHECK-NEXT: [[CMP1_2:%.*]] = icmp slt i32 [[TMP2]], 10
; CHECK-NEXT: br i1 [[CMP1_2]], label [[IF_THEN_2:%.*]], label [[WHILE_END]]
; CHECK: if.then.2:
-; CHECK-NEXT: store i32 0, i32* [[INCDEC_PTR_1]], align 4
+; CHECK-NEXT: store i32 0, ptr [[INCDEC_PTR_1]], align 4
; CHECK-NEXT: br label [[WHILE_END]]
; CHECK: while.end:
; CHECK-NEXT: ret void
br i1 %cmp7, label %while.body, label %while.end
while.body: ; preds = %entry, %if.end
- %x.addr.09 = phi i32* [ %incdec.ptr, %if.end ], [ %x, %entry ]
+ %x.addr.09 = phi ptr [ %incdec.ptr, %if.end ], [ %x, %entry ]
%n.addr.08 = phi i32 [ %dec, %if.end ], [ %rem, %entry ]
- %0 = load i32, i32* %x.addr.09, align 4
+ %0 = load i32, ptr %x.addr.09, align 4
%cmp1 = icmp slt i32 %0, 10
br i1 %cmp1, label %if.then, label %if.end
if.then: ; preds = %while.body
- store i32 0, i32* %x.addr.09, align 4
+ store i32 0, ptr %x.addr.09, align 4
br label %if.end
if.end: ; preds = %if.then, %while.body
- %incdec.ptr = getelementptr inbounds i32, i32* %x.addr.09, i32 1
+ %incdec.ptr = getelementptr inbounds i32, ptr %x.addr.09, i32 1
%dec = add nsw i32 %n.addr.08, -1
%cmp = icmp sgt i32 %dec, 0
br i1 %cmp, label %while.body, label %while.end
for.body.i.i: ; preds = %for.body.i.i.preheader, %for.inc.i.3.i
%i.0137.i.i = phi i32 [ %add.i.3.i, %for.inc.i.3.i ], [ 0, %entry ]
%add.i.i = or i32 %i.0137.i.i, 1
- %arrayidx.i.i = getelementptr inbounds i32, i32* getelementptr inbounds ([50 x i32], [50 x i32]* @data, i32 0, i32 0), i32 %add.i.i
- %l93 = load i32, i32* %arrayidx.i.i, align 4
+ %arrayidx.i.i = getelementptr inbounds i32, ptr @data, i32 %add.i.i
+ %l93 = load i32, ptr %arrayidx.i.i, align 4
%cmp1.i.i = icmp sgt i32 %l93, %l86
br i1 %cmp1.i.i, label %land.lhs.true.i.i, label %for.inc.i.i
land.lhs.true.i.i: ; preds = %for.body.i.i
- %arrayidx2.i.i = getelementptr inbounds i32, i32* getelementptr inbounds ([50 x i32], [50 x i32]* @data, i32 0, i32 0), i32 %i.0137.i.i
- %l94 = load i32, i32* %arrayidx2.i.i, align 4
+ %arrayidx2.i.i = getelementptr inbounds i32, ptr @data, i32 %i.0137.i.i
+ %l94 = load i32, ptr %arrayidx2.i.i, align 4
%cmp3.not.i.i = icmp sgt i32 %l94, %l86
br i1 %cmp3.not.i.i, label %for.inc.i.i, label %for.end.i.if.end8.i_crit_edge.i.loopexit
for.body.i.1.i: ; preds = %for.inc.i.i
%add.i.1.i = or i32 %i.0137.i.i, 2
- %arrayidx.i.1.i = getelementptr inbounds i32, i32* getelementptr inbounds ([50 x i32], [50 x i32]* @data, i32 0, i32 0), i32 %add.i.1.i
- %l345 = load i32, i32* %arrayidx.i.1.i, align 4
+ %arrayidx.i.1.i = getelementptr inbounds i32, ptr @data, i32 %add.i.1.i
+ %l345 = load i32, ptr %arrayidx.i.1.i, align 4
%cmp1.i.1.i = icmp sgt i32 %l345, %l86
br i1 %cmp1.i.1.i, label %land.lhs.true.i.1.i, label %for.inc.i.1.i
for.inc.i.1.i: ; preds = %land.lhs.true.i.1.i, %for.body.i.1.i
%add.i.2.i = or i32 %i.0137.i.i, 3
- %arrayidx.i.2.i = getelementptr inbounds i32, i32* getelementptr inbounds ([50 x i32], [50 x i32]* @data, i32 0, i32 0), i32 %add.i.2.i
- %l346 = load i32, i32* %arrayidx.i.2.i, align 4
+ %arrayidx.i.2.i = getelementptr inbounds i32, ptr @data, i32 %add.i.2.i
+ %l346 = load i32, ptr %arrayidx.i.2.i, align 4
%cmp1.i.2.i = icmp sgt i32 %l346, %l86
br i1 %cmp1.i.2.i, label %land.lhs.true.i.2.i, label %for.inc.i.2.i
for.body.i.3.i: ; preds = %for.inc.i.2.i
%add.i.3.i = add nuw nsw i32 %i.0137.i.i, 4
- %arrayidx.i.3.i = getelementptr inbounds i32, i32* getelementptr inbounds ([50 x i32], [50 x i32]* @data, i32 0, i32 0), i32 %add.i.3.i
- %l347 = load i32, i32* %arrayidx.i.3.i, align 4
+ %arrayidx.i.3.i = getelementptr inbounds i32, ptr @data, i32 %add.i.3.i
+ %l347 = load i32, ptr %arrayidx.i.3.i, align 4
%cmp1.i.3.i = icmp sgt i32 %l347, %l86
br i1 %cmp1.i.3.i, label %land.lhs.true.i.3.i, label %for.inc.i.3.i
; CHECK-LABEL: multiple_liveouts
; CHECK: for.body
; CHECK: br i1 %cmp.not, label %for.cond.cleanup.loopexit, label %for.body
-define void @multiple_liveouts(i32* %x, i32* %y, i32* %d, i32 %n) {
+define void @multiple_liveouts(ptr %x, ptr %y, ptr %d, i32 %n) {
entry:
- %0 = load i32, i32* %d, align 4
- %arrayidx1 = getelementptr inbounds i32, i32* %d, i32 1
- %1 = load i32, i32* %arrayidx1, align 4
- %arrayidx2 = getelementptr inbounds i32, i32* %d, i32 2
- %2 = load i32, i32* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds i32, i32* %d, i32 3
- %3 = load i32, i32* %arrayidx3, align 4
+ %0 = load i32, ptr %d, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr %d, i32 1
+ %1 = load i32, ptr %arrayidx1, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %d, i32 2
+ %2 = load i32, ptr %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr %d, i32 3
+ %3 = load i32, ptr %arrayidx3, align 4
%cmp.not58 = icmp eq i32 %n, 0
br i1 %cmp.not58, label %for.cond.cleanup, label %for.body.preheader
br label %for.body
for.body: ; preds = %for.body.preheader, %for.body
- %x.addr.065 = phi i32* [ %incdec.ptr, %for.body ], [ %x, %for.body.preheader ]
- %y.addr.064 = phi i32* [ %incdec.ptr25, %for.body ], [ %y, %for.body.preheader ]
+ %x.addr.065 = phi ptr [ %incdec.ptr, %for.body ], [ %x, %for.body.preheader ]
+ %y.addr.064 = phi ptr [ %incdec.ptr25, %for.body ], [ %y, %for.body.preheader ]
%res00.063 = phi i32 [ %add, %for.body ], [ %0, %for.body.preheader ]
%rhs_cols_idx.062 = phi i32 [ %dec, %for.body ], [ %n, %for.body.preheader ]
%res11.061 = phi i32 [ %add24, %for.body ], [ %3, %for.body.preheader ]
%res10.060 = phi i32 [ %add20, %for.body ], [ %2, %for.body.preheader ]
%res01.059 = phi i32 [ %add14, %for.body ], [ %1, %for.body.preheader ]
- %4 = load i32, i32* %x.addr.065, align 4
- %arrayidx5 = getelementptr inbounds i32, i32* %x.addr.065, i32 %n
- %5 = load i32, i32* %arrayidx5, align 4
- %6 = load i32, i32* %y.addr.064, align 4
+ %4 = load i32, ptr %x.addr.065, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %x.addr.065, i32 %n
+ %5 = load i32, ptr %arrayidx5, align 4
+ %6 = load i32, ptr %y.addr.064, align 4
%conv9 = and i32 %6, 255
%conv10 = and i32 %4, 255
%mul = mul nuw nsw i32 %conv9, %conv10
%conv12 = and i32 %5, 255
%mul13 = mul nuw nsw i32 %conv9, %conv12
%add14 = add nsw i32 %mul13, %res01.059
- %arrayidx15 = getelementptr inbounds i32, i32* %y.addr.064, i32 %n
- %7 = load i32, i32* %arrayidx15, align 4
+ %arrayidx15 = getelementptr inbounds i32, ptr %y.addr.064, i32 %n
+ %7 = load i32, ptr %arrayidx15, align 4
%conv17 = and i32 %7, 255
%mul19 = mul nuw nsw i32 %conv17, %conv10
%add20 = add nsw i32 %mul19, %res10.060
%mul23 = mul nuw nsw i32 %conv17, %conv12
%add24 = add nsw i32 %mul23, %res11.061
- %incdec.ptr = getelementptr inbounds i32, i32* %x.addr.065, i32 1
- %incdec.ptr25 = getelementptr inbounds i32, i32* %y.addr.064, i32 1
+ %incdec.ptr = getelementptr inbounds i32, ptr %x.addr.065, i32 1
+ %incdec.ptr25 = getelementptr inbounds i32, ptr %y.addr.064, i32 1
%dec = add nsw i32 %rhs_cols_idx.062, -1
%cmp.not = icmp eq i32 %dec, 0
br i1 %cmp.not, label %for.cond.cleanup.loopexit, label %for.body
%res10.0 = phi i32 [ %2, %entry ], [ %add20.lcssa, %for.cond.cleanup.loopexit ]
%res11.0 = phi i32 [ %3, %entry ], [ %add24.lcssa, %for.cond.cleanup.loopexit ]
%res00.0 = phi i32 [ %0, %entry ], [ %add.lcssa, %for.cond.cleanup.loopexit ]
- store i32 %res00.0, i32* %d, align 4
- store i32 %res01.0, i32* %arrayidx1, align 4
- store i32 %res10.0, i32* %arrayidx2, align 4
- store i32 %res11.0, i32* %arrayidx3, align 4
+ store i32 %res00.0, ptr %d, align 4
+ store i32 %res01.0, ptr %arrayidx1, align 4
+ store i32 %res10.0, ptr %arrayidx2, align 4
+ store i32 %res11.0, ptr %arrayidx3, align 4
ret void
}
; CHECK-LABEL: multiple_liveouts_doubleexit
; CHECK: for.body
; CHECK: br i1 %cmp.not, label %cleanup22.loopexit2, label %for.body
-define void @multiple_liveouts_doubleexit(i32 %n, i32* %x, i32* %y, i32* %z) {
+define void @multiple_liveouts_doubleexit(i32 %n, ptr %x, ptr %y, ptr %z) {
entry:
%cmp.not55 = icmp eq i32 %n, 0
br i1 %cmp.not55, label %cleanup22, label %for.body.preheader
br label %for.body
for.body: ; preds = %for.body.preheader, %for.inc
- %x.addr.062 = phi i32* [ %incdec.ptr, %for.inc ], [ %x, %for.body.preheader ]
- %y.addr.061 = phi i32* [ %incdec.ptr19, %for.inc ], [ %y, %for.body.preheader ]
+ %x.addr.062 = phi ptr [ %incdec.ptr, %for.inc ], [ %x, %for.body.preheader ]
+ %y.addr.061 = phi ptr [ %incdec.ptr19, %for.inc ], [ %y, %for.body.preheader ]
%rhs_cols_idx.060 = phi i32 [ %dec, %for.inc ], [ %n, %for.body.preheader ]
%res11.059 = phi i32 [ %add18, %for.inc ], [ 0, %for.body.preheader ]
%res10.058 = phi i32 [ %add16, %for.inc ], [ 0, %for.body.preheader ]
%res01.057 = phi i32 [ %add8, %for.inc ], [ 0, %for.body.preheader ]
%res00.056 = phi i32 [ %add, %for.inc ], [ 0, %for.body.preheader ]
- %0 = load i32, i32* %x.addr.062, align 4
- %1 = load i32, i32* %y.addr.061, align 4
+ %0 = load i32, ptr %x.addr.062, align 4
+ %1 = load i32, ptr %y.addr.061, align 4
%conv5 = and i32 %1, 255
%conv6 = and i32 %0, 255
%mul = mul nuw nsw i32 %conv5, %conv6
br i1 %cmp9, label %cleanup22.loopexit, label %for.inc
for.inc: ; preds = %for.body
- %arrayidx11 = getelementptr inbounds i32, i32* %y.addr.061, i32 %n
- %2 = load i32, i32* %arrayidx11, align 4
+ %arrayidx11 = getelementptr inbounds i32, ptr %y.addr.061, i32 %n
+ %2 = load i32, ptr %arrayidx11, align 4
%conv13 = and i32 %2, 255
%mul15 = mul nuw nsw i32 %conv13, %conv6
%add16 = add nuw nsw i32 %mul15, %res10.058
%add18 = add nuw nsw i32 %conv13, %res11.059
- %incdec.ptr = getelementptr inbounds i32, i32* %x.addr.062, i32 1
- %incdec.ptr19 = getelementptr inbounds i32, i32* %y.addr.061, i32 1
+ %incdec.ptr = getelementptr inbounds i32, ptr %x.addr.062, i32 1
+ %incdec.ptr19 = getelementptr inbounds i32, ptr %y.addr.061, i32 1
%dec = add nsw i32 %rhs_cols_idx.060, -1
%cmp.not = icmp eq i32 %dec, 0
br i1 %cmp.not, label %cleanup22.loopexit2, label %for.body
%res11.0.lcssa = phi i32 [ 0, %entry ], [ %res11.0.lcssa.ph, %cleanup22.loopexit ], [ %res11.0.lcssa.ph2, %cleanup22.loopexit2 ]
%res00.1 = phi i32 [ 0, %entry ], [ %add.lcssa, %cleanup22.loopexit ], [ %add.lcssa2, %cleanup22.loopexit2 ]
%res01.1 = phi i32 [ 0, %entry ], [ %add8.lcssa, %cleanup22.loopexit ], [ %add8.lcssa2, %cleanup22.loopexit2 ]
- store i32 %res00.1, i32* %z, align 4
- %arrayidx24 = getelementptr inbounds i32, i32* %z, i32 1
- store i32 %res01.1, i32* %arrayidx24, align 4
- %arrayidx25 = getelementptr inbounds i32, i32* %z, i32 2
- store i32 %res10.0.lcssa, i32* %arrayidx25, align 4
- %arrayidx26 = getelementptr inbounds i32, i32* %z, i32 3
- store i32 %res11.0.lcssa, i32* %arrayidx26, align 4
+ store i32 %res00.1, ptr %z, align 4
+ %arrayidx24 = getelementptr inbounds i32, ptr %z, i32 1
+ store i32 %res01.1, ptr %arrayidx24, align 4
+ %arrayidx25 = getelementptr inbounds i32, ptr %z, i32 2
+ store i32 %res10.0.lcssa, ptr %arrayidx25, align 4
+ %arrayidx26 = getelementptr inbounds i32, ptr %z, i32 3
+ store i32 %res11.0.lcssa, ptr %arrayidx26, align 4
ret void
}
; Function Attrs: noinline nounwind optnone uwtable
define void @foo() local_unnamed_addr #0 {
bb:
- %tmp = alloca [5 x i32*], align 16
+ %tmp = alloca [5 x ptr], align 16
br label %bb7.preheader
bb3.loopexit: ; preds = %bb10
bb10: ; preds = %bb10, %bb7.preheader
%indvars.iv = phi i64 [ 0, %bb7.preheader ], [ %indvars.iv.next, %bb10 ]
%tmp1.14 = phi i32 [ %tmp1.06, %bb7.preheader ], [ %spec.select, %bb10 ]
- %tmp13 = getelementptr inbounds [5 x i32*], [5 x i32*]* %tmp, i64 0, i64 %indvars.iv
- %tmp14 = load i32*, i32** %tmp13, align 8
- %tmp15.not = icmp ne i32* %tmp14, null
+ %tmp13 = getelementptr inbounds [5 x ptr], ptr %tmp, i64 0, i64 %indvars.iv
+ %tmp14 = load ptr, ptr %tmp13, align 8
+ %tmp15.not = icmp ne ptr %tmp14, null
%tmp18 = sext i1 %tmp15.not to i32
%spec.select = add nsw i32 %tmp1.14, %tmp18
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
; CHECK: while.body.peel2
%struct.STREAM = type { %union.anon, i32, i32 }
-%union.anon = type { i32* }
+%union.anon = type { ptr }
-define void @function(%struct.STREAM* nocapture readonly %b) local_unnamed_addr {
+define void @function(ptr nocapture readonly %b) local_unnamed_addr {
entry:
- %bitPtr3 = getelementptr inbounds %struct.STREAM, %struct.STREAM* %b, i32 0, i32 2
- %0 = load i32, i32* %bitPtr3, align 4
+ %bitPtr3 = getelementptr inbounds %struct.STREAM, ptr %b, i32 0, i32 2
+ %0 = load i32, ptr %bitPtr3, align 4
%cmp11 = icmp ult i32 %0, 32
br i1 %cmp11, label %while.body.preheader, label %do.end
while.body.preheader:
- %value2 = getelementptr inbounds %struct.STREAM, %struct.STREAM* %b, i32 0, i32 1
- %1 = load i32, i32* %value2, align 4
- %w = getelementptr inbounds %struct.STREAM, %struct.STREAM* %b, i32 0, i32 0, i32 0
- %2 = load i32*, i32** %w, align 4
+ %value2 = getelementptr inbounds %struct.STREAM, ptr %b, i32 0, i32 1
+ %1 = load i32, ptr %value2, align 4
+ %2 = load ptr, ptr %b, align 4
br label %while.body
while.body:
%bitPtr.014 = phi i32 [ %add, %while.body ], [ %0, %while.body.preheader ]
%value.013 = phi i32 [ %shl, %while.body ], [ %1, %while.body.preheader ]
- %ptr.012 = phi i32* [ %incdec.ptr, %while.body ], [ %2, %while.body.preheader ]
+ %ptr.012 = phi ptr [ %incdec.ptr, %while.body ], [ %2, %while.body.preheader ]
%add = add nuw i32 %bitPtr.014, 8
%shr = lshr i32 %value.013, 24
- %incdec.ptr = getelementptr inbounds i32, i32* %ptr.012, i32 1
- store i32 %shr, i32* %ptr.012, align 4
+ %incdec.ptr = getelementptr inbounds i32, ptr %ptr.012, i32 1
+ store i32 %shr, ptr %ptr.012, align 4
%shl = shl i32 %value.013, 8
%cmp = icmp ult i32 %add, 17
br i1 %cmp, label %while.body, label %do.end
;; Check that we do emit expensive instructions to compute trip
;; counts when unrolling loops on the a2 (because we unroll a lot).
-define i32 @test(i64 %v12, i8* %array, i64* %loc) {
+define i32 @test(i64 %v12, ptr %array, ptr %loc) {
; CHECK-LABEL: @test(
; CHECK: udiv
entry:
- %step = load i64, i64* %loc, !range !0
+ %step = load i64, ptr %loc, !range !0
br label %loop
loop: ; preds = %entry, %loop
%k.015 = phi i64 [ %v15, %loop ], [ %v12, %entry ]
- %v14 = getelementptr inbounds i8, i8* %array, i64 %k.015
- store i8 0, i8* %v14
+ %v14 = getelementptr inbounds i8, ptr %array, i64 %k.015
+ store i8 0, ptr %v14
%v15 = add nuw nsw i64 %k.015, %step
%v16 = icmp slt i64 %v15, 8193
br i1 %v16, label %loop, label %loopexit
; RUN: opt < %s -S -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 -passes=loop-unroll -unroll-runtime-epilog=true | FileCheck %s -check-prefix=EPILOG
; RUN: opt < %s -S -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 -passes=loop-unroll -unroll-runtime-epilog=false | FileCheck %s -check-prefix=PROLOG
-define i32 @test(i32* nocapture %a, i32 %n) nounwind uwtable readonly {
+define i32 @test(ptr nocapture %a, i32 %n) nounwind uwtable readonly {
entry:
%cmp1 = icmp eq i32 %n, 0
br i1 %cmp1, label %for.end, label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%0 = type <{ double }>
-define dso_local void @test(i32* %arg) #0 {
+define dso_local void @test(ptr %arg) #0 {
; CHECK-LABEL: @test(
; CHECK-NEXT: bb:
; CHECK-NEXT: br label [[BB16:%.*]]
; CHECK: bb16:
-; CHECK-NEXT: [[I20:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull inttoptr (i64 -32 to i8*))
+; CHECK-NEXT: [[I20:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
; CHECK-NEXT: [[I24:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20]])
; CHECK-NEXT: [[I24_ELT:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24]], 0
-; CHECK-NEXT: store <16 x i8> [[I24_ELT]], <16 x i8>* inttoptr (i64 48 to <16 x i8>*), align 16
+; CHECK-NEXT: store <16 x i8> [[I24_ELT]], ptr inttoptr (i64 48 to ptr), align 16
; CHECK-NEXT: [[I24_ELT1:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24]], 1
-; CHECK-NEXT: store <16 x i8> [[I24_ELT1]], <16 x i8>* inttoptr (i64 64 to <16 x i8>*), align 64
-; CHECK-NEXT: [[I20_1:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull inttoptr (i64 -32 to i8*))
+; CHECK-NEXT: store <16 x i8> [[I24_ELT1]], ptr inttoptr (i64 64 to ptr), align 64
+; CHECK-NEXT: [[I20_1:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
; CHECK-NEXT: [[I24_1:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_1]])
; CHECK-NEXT: [[I24_ELT_1:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_1]], 0
-; CHECK-NEXT: store <16 x i8> [[I24_ELT_1]], <16 x i8>* inttoptr (i64 48 to <16 x i8>*), align 16
+; CHECK-NEXT: store <16 x i8> [[I24_ELT_1]], ptr inttoptr (i64 48 to ptr), align 16
; CHECK-NEXT: [[I24_ELT1_1:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_1]], 1
-; CHECK-NEXT: store <16 x i8> [[I24_ELT1_1]], <16 x i8>* inttoptr (i64 64 to <16 x i8>*), align 64
-; CHECK-NEXT: [[I20_2:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull inttoptr (i64 -32 to i8*))
+; CHECK-NEXT: store <16 x i8> [[I24_ELT1_1]], ptr inttoptr (i64 64 to ptr), align 64
+; CHECK-NEXT: [[I20_2:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
; CHECK-NEXT: [[I24_2:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_2]])
; CHECK-NEXT: [[I24_ELT_2:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_2]], 0
-; CHECK-NEXT: store <16 x i8> [[I24_ELT_2]], <16 x i8>* inttoptr (i64 48 to <16 x i8>*), align 16
+; CHECK-NEXT: store <16 x i8> [[I24_ELT_2]], ptr inttoptr (i64 48 to ptr), align 16
; CHECK-NEXT: [[I24_ELT1_2:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_2]], 1
-; CHECK-NEXT: store <16 x i8> [[I24_ELT1_2]], <16 x i8>* inttoptr (i64 64 to <16 x i8>*), align 64
-; CHECK-NEXT: [[I20_3:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull inttoptr (i64 -32 to i8*))
+; CHECK-NEXT: store <16 x i8> [[I24_ELT1_2]], ptr inttoptr (i64 64 to ptr), align 64
+; CHECK-NEXT: [[I20_3:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
; CHECK-NEXT: [[I24_3:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_3]])
; CHECK-NEXT: [[I24_ELT_3:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_3]], 0
-; CHECK-NEXT: store <16 x i8> [[I24_ELT_3]], <16 x i8>* inttoptr (i64 48 to <16 x i8>*), align 16
+; CHECK-NEXT: store <16 x i8> [[I24_ELT_3]], ptr inttoptr (i64 48 to ptr), align 16
; CHECK-NEXT: [[I24_ELT1_3:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_3]], 1
-; CHECK-NEXT: store <16 x i8> [[I24_ELT1_3]], <16 x i8>* inttoptr (i64 64 to <16 x i8>*), align 64
-; CHECK-NEXT: [[I20_4:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull inttoptr (i64 -32 to i8*))
+; CHECK-NEXT: store <16 x i8> [[I24_ELT1_3]], ptr inttoptr (i64 64 to ptr), align 64
+; CHECK-NEXT: [[I20_4:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
; CHECK-NEXT: [[I24_4:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_4]])
; CHECK-NEXT: [[I24_ELT_4:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_4]], 0
-; CHECK-NEXT: store <16 x i8> [[I24_ELT_4]], <16 x i8>* inttoptr (i64 48 to <16 x i8>*), align 16
+; CHECK-NEXT: store <16 x i8> [[I24_ELT_4]], ptr inttoptr (i64 48 to ptr), align 16
; CHECK-NEXT: [[I24_ELT1_4:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_4]], 1
-; CHECK-NEXT: store <16 x i8> [[I24_ELT1_4]], <16 x i8>* inttoptr (i64 64 to <16 x i8>*), align 64
-; CHECK-NEXT: [[I20_5:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull inttoptr (i64 -32 to i8*))
+; CHECK-NEXT: store <16 x i8> [[I24_ELT1_4]], ptr inttoptr (i64 64 to ptr), align 64
+; CHECK-NEXT: [[I20_5:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
; CHECK-NEXT: [[I24_5:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_5]])
; CHECK-NEXT: [[I24_ELT_5:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_5]], 0
-; CHECK-NEXT: store <16 x i8> [[I24_ELT_5]], <16 x i8>* inttoptr (i64 48 to <16 x i8>*), align 16
+; CHECK-NEXT: store <16 x i8> [[I24_ELT_5]], ptr inttoptr (i64 48 to ptr), align 16
; CHECK-NEXT: [[I24_ELT1_5:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_5]], 1
-; CHECK-NEXT: store <16 x i8> [[I24_ELT1_5]], <16 x i8>* inttoptr (i64 64 to <16 x i8>*), align 64
-; CHECK-NEXT: [[I20_6:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull inttoptr (i64 -32 to i8*))
+; CHECK-NEXT: store <16 x i8> [[I24_ELT1_5]], ptr inttoptr (i64 64 to ptr), align 64
+; CHECK-NEXT: [[I20_6:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
; CHECK-NEXT: [[I24_6:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_6]])
; CHECK-NEXT: [[I24_ELT_6:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_6]], 0
-; CHECK-NEXT: store <16 x i8> [[I24_ELT_6]], <16 x i8>* inttoptr (i64 48 to <16 x i8>*), align 16
+; CHECK-NEXT: store <16 x i8> [[I24_ELT_6]], ptr inttoptr (i64 48 to ptr), align 16
; CHECK-NEXT: [[I24_ELT1_6:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_6]], 1
-; CHECK-NEXT: store <16 x i8> [[I24_ELT1_6]], <16 x i8>* inttoptr (i64 64 to <16 x i8>*), align 64
-; CHECK-NEXT: [[I20_7:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull inttoptr (i64 -32 to i8*))
+; CHECK-NEXT: store <16 x i8> [[I24_ELT1_6]], ptr inttoptr (i64 64 to ptr), align 64
+; CHECK-NEXT: [[I20_7:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
; CHECK-NEXT: [[I24_7:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_7]])
; CHECK-NEXT: [[I24_ELT_7:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_7]], 0
-; CHECK-NEXT: store <16 x i8> [[I24_ELT_7]], <16 x i8>* inttoptr (i64 48 to <16 x i8>*), align 16
+; CHECK-NEXT: store <16 x i8> [[I24_ELT_7]], ptr inttoptr (i64 48 to ptr), align 16
; CHECK-NEXT: [[I24_ELT1_7:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_7]], 1
-; CHECK-NEXT: store <16 x i8> [[I24_ELT1_7]], <16 x i8>* inttoptr (i64 64 to <16 x i8>*), align 64
-; CHECK-NEXT: [[I20_8:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull inttoptr (i64 -32 to i8*))
+; CHECK-NEXT: store <16 x i8> [[I24_ELT1_7]], ptr inttoptr (i64 64 to ptr), align 64
+; CHECK-NEXT: [[I20_8:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
; CHECK-NEXT: [[I24_8:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_8]])
; CHECK-NEXT: [[I24_ELT_8:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_8]], 0
-; CHECK-NEXT: store <16 x i8> [[I24_ELT_8]], <16 x i8>* inttoptr (i64 48 to <16 x i8>*), align 16
+; CHECK-NEXT: store <16 x i8> [[I24_ELT_8]], ptr inttoptr (i64 48 to ptr), align 16
; CHECK-NEXT: [[I24_ELT1_8:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_8]], 1
-; CHECK-NEXT: store <16 x i8> [[I24_ELT1_8]], <16 x i8>* inttoptr (i64 64 to <16 x i8>*), align 64
-; CHECK-NEXT: [[I20_9:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull inttoptr (i64 -32 to i8*))
+; CHECK-NEXT: store <16 x i8> [[I24_ELT1_8]], ptr inttoptr (i64 64 to ptr), align 64
+; CHECK-NEXT: [[I20_9:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
; CHECK-NEXT: [[I24_9:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_9]])
; CHECK-NEXT: [[I24_ELT_9:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_9]], 0
-; CHECK-NEXT: store <16 x i8> [[I24_ELT_9]], <16 x i8>* inttoptr (i64 48 to <16 x i8>*), align 16
+; CHECK-NEXT: store <16 x i8> [[I24_ELT_9]], ptr inttoptr (i64 48 to ptr), align 16
; CHECK-NEXT: [[I24_ELT1_9:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_9]], 1
-; CHECK-NEXT: store <16 x i8> [[I24_ELT1_9]], <16 x i8>* inttoptr (i64 64 to <16 x i8>*), align 64
-; CHECK-NEXT: [[I20_10:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull inttoptr (i64 -32 to i8*))
+; CHECK-NEXT: store <16 x i8> [[I24_ELT1_9]], ptr inttoptr (i64 64 to ptr), align 64
+; CHECK-NEXT: [[I20_10:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
; CHECK-NEXT: [[I24_10:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_10]])
; CHECK-NEXT: [[I24_ELT_10:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_10]], 0
-; CHECK-NEXT: store <16 x i8> [[I24_ELT_10]], <16 x i8>* inttoptr (i64 48 to <16 x i8>*), align 16
+; CHECK-NEXT: store <16 x i8> [[I24_ELT_10]], ptr inttoptr (i64 48 to ptr), align 16
; CHECK-NEXT: [[I24_ELT1_10:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_10]], 1
-; CHECK-NEXT: store <16 x i8> [[I24_ELT1_10]], <16 x i8>* inttoptr (i64 64 to <16 x i8>*), align 64
-; CHECK-NEXT: [[I20_11:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull inttoptr (i64 -32 to i8*))
+; CHECK-NEXT: store <16 x i8> [[I24_ELT1_10]], ptr inttoptr (i64 64 to ptr), align 64
+; CHECK-NEXT: [[I20_11:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
; CHECK-NEXT: [[I24_11:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_11]])
; CHECK-NEXT: [[I24_ELT_11:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_11]], 0
-; CHECK-NEXT: store <16 x i8> [[I24_ELT_11]], <16 x i8>* inttoptr (i64 48 to <16 x i8>*), align 16
+; CHECK-NEXT: store <16 x i8> [[I24_ELT_11]], ptr inttoptr (i64 48 to ptr), align 16
; CHECK-NEXT: [[I24_ELT1_11:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_11]], 1
-; CHECK-NEXT: store <16 x i8> [[I24_ELT1_11]], <16 x i8>* inttoptr (i64 64 to <16 x i8>*), align 64
-; CHECK-NEXT: [[I20_12:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull inttoptr (i64 -32 to i8*))
+; CHECK-NEXT: store <16 x i8> [[I24_ELT1_11]], ptr inttoptr (i64 64 to ptr), align 64
+; CHECK-NEXT: [[I20_12:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
; CHECK-NEXT: [[I24_12:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_12]])
; CHECK-NEXT: [[I24_ELT_12:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_12]], 0
-; CHECK-NEXT: store <16 x i8> [[I24_ELT_12]], <16 x i8>* inttoptr (i64 48 to <16 x i8>*), align 16
+; CHECK-NEXT: store <16 x i8> [[I24_ELT_12]], ptr inttoptr (i64 48 to ptr), align 16
; CHECK-NEXT: [[I24_ELT1_12:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_12]], 1
-; CHECK-NEXT: store <16 x i8> [[I24_ELT1_12]], <16 x i8>* inttoptr (i64 64 to <16 x i8>*), align 64
-; CHECK-NEXT: [[I20_13:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull inttoptr (i64 -32 to i8*))
+; CHECK-NEXT: store <16 x i8> [[I24_ELT1_12]], ptr inttoptr (i64 64 to ptr), align 64
+; CHECK-NEXT: [[I20_13:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
; CHECK-NEXT: [[I24_13:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_13]])
; CHECK-NEXT: [[I24_ELT_13:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_13]], 0
-; CHECK-NEXT: store <16 x i8> [[I24_ELT_13]], <16 x i8>* inttoptr (i64 48 to <16 x i8>*), align 16
+; CHECK-NEXT: store <16 x i8> [[I24_ELT_13]], ptr inttoptr (i64 48 to ptr), align 16
; CHECK-NEXT: [[I24_ELT1_13:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_13]], 1
-; CHECK-NEXT: store <16 x i8> [[I24_ELT1_13]], <16 x i8>* inttoptr (i64 64 to <16 x i8>*), align 64
-; CHECK-NEXT: [[I20_14:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull inttoptr (i64 -32 to i8*))
+; CHECK-NEXT: store <16 x i8> [[I24_ELT1_13]], ptr inttoptr (i64 64 to ptr), align 64
+; CHECK-NEXT: [[I20_14:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
; CHECK-NEXT: [[I24_14:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_14]])
; CHECK-NEXT: [[I24_ELT_14:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_14]], 0
-; CHECK-NEXT: store <16 x i8> [[I24_ELT_14]], <16 x i8>* inttoptr (i64 48 to <16 x i8>*), align 16
+; CHECK-NEXT: store <16 x i8> [[I24_ELT_14]], ptr inttoptr (i64 48 to ptr), align 16
; CHECK-NEXT: [[I24_ELT1_14:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_14]], 1
-; CHECK-NEXT: store <16 x i8> [[I24_ELT1_14]], <16 x i8>* inttoptr (i64 64 to <16 x i8>*), align 64
-; CHECK-NEXT: [[I20_15:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* nonnull inttoptr (i64 -32 to i8*))
+; CHECK-NEXT: store <16 x i8> [[I24_ELT1_14]], ptr inttoptr (i64 64 to ptr), align 64
+; CHECK-NEXT: [[I20_15:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
; CHECK-NEXT: [[I24_15:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_15]])
; CHECK-NEXT: [[I24_ELT_15:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_15]], 0
-; CHECK-NEXT: store <16 x i8> [[I24_ELT_15]], <16 x i8>* inttoptr (i64 48 to <16 x i8>*), align 16
+; CHECK-NEXT: store <16 x i8> [[I24_ELT_15]], ptr inttoptr (i64 48 to ptr), align 16
; CHECK-NEXT: [[I24_ELT1_15:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_15]], 1
-; CHECK-NEXT: store <16 x i8> [[I24_ELT1_15]], <16 x i8>* inttoptr (i64 64 to <16 x i8>*), align 64
+; CHECK-NEXT: store <16 x i8> [[I24_ELT1_15]], ptr inttoptr (i64 64 to ptr), align 64
; CHECK-NEXT: br label [[BB16]], !llvm.loop [[LOOP0:![0-9]+]]
;
bb:
- %i = alloca i32*, align 8
- store i32* %arg, i32** %i, align 8
- %i1 = alloca [0 x %0]*, align 8
- %i2 = alloca double*, align 8
+ %i = alloca ptr, align 8
+ store ptr %arg, ptr %i, align 8
+ %i1 = alloca ptr, align 8
+ %i2 = alloca ptr, align 8
%i3 = alloca i32, align 4
%i4 = alloca i32, align 4
%i5 = alloca i64, align 8
%i6 = alloca i64, align 8
%i7 = alloca <256 x i1>, align 32
- %i8 = load i32*, i32** %i, align 8
- %i9 = load i32, i32* %i8, align 4
+ %i8 = load ptr, ptr %i, align 8
+ %i9 = load i32, ptr %i8, align 4
%i10 = sub nsw i32 %i9, 0
- store i32 %i10, i32* %i4, align 4
- %i11 = load i32, i32* %i4, align 4
+ store i32 %i10, ptr %i4, align 4
+ %i11 = load i32, ptr %i4, align 4
%i12 = ashr i32 %i11, 5
%i13 = sext i32 %i12 to i64
- %i14 = load i64, i64* %i6, align 8
+ %i14 = load i64, ptr %i6, align 8
%i15 = sub nsw i64 %i14, 1
br label %bb16
bb16: ; preds = %bb16, %bb
- %i17 = load i64, i64* %i5, align 8
+ %i17 = load i64, ptr %i5, align 8
%i18 = icmp sge i64 %i17, 1
- %i19 = getelementptr i8, i8* null, i64 -32
- %i20 = call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %i19)
- store <256 x i1> %i20, <256 x i1>* %i7, align 32
- %i21 = getelementptr inbounds i8, i8* null, i64 48
- %i22 = bitcast i8* %i21 to <2 x double>*
- %i23 = load <256 x i1>, <256 x i1>* %i7, align 32
+ %i19 = getelementptr i8, ptr null, i64 -32
+ %i20 = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %i19)
+ store <256 x i1> %i20, ptr %i7, align 32
+ %i21 = getelementptr inbounds i8, ptr null, i64 48
+ %i23 = load <256 x i1>, ptr %i7, align 32
%i24 = call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> %i23)
- %i25 = bitcast <2 x double>* %i22 to { <16 x i8>, <16 x i8> }*
- store { <16 x i8>, <16 x i8> } %i24, { <16 x i8>, <16 x i8> }* %i25, align 16
+ store { <16 x i8>, <16 x i8> } %i24, ptr %i21, align 16
br label %bb16, !llvm.loop !1
}
; Function Attrs: argmemonly nounwind readonly
-declare <256 x i1> @llvm.ppc.vsx.lxvp(i8*) #1
+declare <256 x i1> @llvm.ppc.vsx.lxvp(ptr) #1
; Function Attrs: nounwind readnone
declare { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1>) #2
target triple = "powerpc64le-unknown-linux-gnu"
; Function Attrs: norecurse nounwind
-define i8* @f(i8* returned %s, i32 zeroext %x, i32 signext %k) local_unnamed_addr #0 {
+define ptr @f(ptr returned %s, i32 zeroext %x, i32 signext %k) local_unnamed_addr #0 {
; CHECK-LABEL: @f(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[K:%.*]], 0
; CHECK-NEXT: [[TMP5:%.*]] = and <16 x i32> [[TMP4]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq <16 x i32> [[TMP5]], zeroinitializer
; CHECK-NEXT: [[TMP7:%.*]] = select <16 x i1> [[TMP6]], <16 x i8> <i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48>, <16 x i8> <i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49>
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, i8* [[S:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to <16 x i8>*
-; CHECK-NEXT: store <16 x i8> [[TMP7]], <16 x i8>* [[TMP9]], align 1
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: store <16 x i8> [[TMP7]], ptr [[TMP8]], align 1
; CHECK-NEXT: [[INDEX_NEXT:%.*]] = add nuw nsw i64 [[INDEX]], 16
; CHECK-NEXT: [[VEC_IND_NEXT13:%.*]] = add <16 x i32> [[VEC_IND12]], <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
; CHECK-NEXT: [[NITER_NEXT:%.*]] = add nuw nsw i64 [[NITER]], 1
; CHECK-NEXT: [[TMP11:%.*]] = and <16 x i32> [[TMP10]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <16 x i32> [[TMP11]], zeroinitializer
; CHECK-NEXT: [[TMP13:%.*]] = select <16 x i1> [[TMP12]], <16 x i8> <i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48>, <16 x i8> <i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49>
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDEX_NEXT]]
-; CHECK-NEXT: [[TMP15:%.*]] = bitcast i8* [[TMP14]] to <16 x i8>*
-; CHECK-NEXT: store <16 x i8> [[TMP13]], <16 x i8>* [[TMP15]], align 1
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDEX_NEXT]]
+; CHECK-NEXT: store <16 x i8> [[TMP13]], ptr [[TMP14]], align 1
; CHECK-NEXT: [[INDEX_NEXT_1]] = add i64 [[INDEX_NEXT]], 16
; CHECK-NEXT: [[VEC_IND_NEXT13_1]] = add <16 x i32> [[VEC_IND_NEXT13]], <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
; CHECK-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER_NEXT]], 1
; CHECK-NEXT: [[TMP17:%.*]] = and <16 x i32> [[TMP16]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq <16 x i32> [[TMP17]], zeroinitializer
; CHECK-NEXT: [[TMP19:%.*]] = select <16 x i1> [[TMP18]], <16 x i8> <i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48>, <16 x i8> <i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49>
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDEX_UNR]]
-; CHECK-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP20]] to <16 x i8>*
-; CHECK-NEXT: store <16 x i8> [[TMP19]], <16 x i8>* [[TMP21]], align 1
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDEX_UNR]]
+; CHECK-NEXT: store <16 x i8> [[TMP19]], ptr [[TMP20]], align 1
; CHECK-NEXT: br label [[MIDDLE_BLOCK]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[WIDE_TRIP_COUNT]]
; CHECK-NEXT: [[AND_PROL:%.*]] = and i32 [[SHL_PROL]], [[X]]
; CHECK-NEXT: [[TOBOOL_PROL:%.*]] = icmp eq i32 [[AND_PROL]], 0
; CHECK-NEXT: [[CONV_PROL:%.*]] = select i1 [[TOBOOL_PROL]], i8 48, i8 49
-; CHECK-NEXT: [[ARRAYIDX_PROL:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDVARS_IV_PROL]]
-; CHECK-NEXT: store i8 [[CONV_PROL]], i8* [[ARRAYIDX_PROL]], align 1
+; CHECK-NEXT: [[ARRAYIDX_PROL:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDVARS_IV_PROL]]
+; CHECK-NEXT: store i8 [[CONV_PROL]], ptr [[ARRAYIDX_PROL]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT_PROL]] = add nuw nsw i64 [[INDVARS_IV_PROL]], 1
; CHECK-NEXT: [[EXITCOND_PROL:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_PROL]], [[WIDE_TRIP_COUNT]]
; CHECK-NEXT: [[PROL_ITER_NEXT]] = add i64 [[PROL_ITER]], 1
; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL]], [[X]]
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: [[CONV:%.*]] = select i1 [[TOBOOL]], i8 48, i8 49
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: store i8 [[CONV]], i8* [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: store i8 [[CONV]], ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
; CHECK-NEXT: [[SHL_1:%.*]] = shl i32 1, [[TMP28]]
; CHECK-NEXT: [[AND_1:%.*]] = and i32 [[SHL_1]], [[X]]
; CHECK-NEXT: [[TOBOOL_1:%.*]] = icmp eq i32 [[AND_1]], 0
; CHECK-NEXT: [[CONV_1:%.*]] = select i1 [[TOBOOL_1]], i8 48, i8 49
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDVARS_IV_NEXT]]
-; CHECK-NEXT: store i8 [[CONV_1]], i8* [[ARRAYIDX_1]], align 1
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: store i8 [[CONV_1]], ptr [[ARRAYIDX_1]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT_1:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT]], 1
; CHECK-NEXT: [[TMP29:%.*]] = trunc i64 [[INDVARS_IV_NEXT_1]] to i32
; CHECK-NEXT: [[SHL_2:%.*]] = shl i32 1, [[TMP29]]
; CHECK-NEXT: [[AND_2:%.*]] = and i32 [[SHL_2]], [[X]]
; CHECK-NEXT: [[TOBOOL_2:%.*]] = icmp eq i32 [[AND_2]], 0
; CHECK-NEXT: [[CONV_2:%.*]] = select i1 [[TOBOOL_2]], i8 48, i8 49
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDVARS_IV_NEXT_1]]
-; CHECK-NEXT: store i8 [[CONV_2]], i8* [[ARRAYIDX_2]], align 1
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDVARS_IV_NEXT_1]]
+; CHECK-NEXT: store i8 [[CONV_2]], ptr [[ARRAYIDX_2]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT_2:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_1]], 1
; CHECK-NEXT: [[TMP30:%.*]] = trunc i64 [[INDVARS_IV_NEXT_2]] to i32
; CHECK-NEXT: [[SHL_3:%.*]] = shl i32 1, [[TMP30]]
; CHECK-NEXT: [[AND_3:%.*]] = and i32 [[SHL_3]], [[X]]
; CHECK-NEXT: [[TOBOOL_3:%.*]] = icmp eq i32 [[AND_3]], 0
; CHECK-NEXT: [[CONV_3:%.*]] = select i1 [[TOBOOL_3]], i8 48, i8 49
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDVARS_IV_NEXT_2]]
-; CHECK-NEXT: store i8 [[CONV_3]], i8* [[ARRAYIDX_3]], align 1
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDVARS_IV_NEXT_2]]
+; CHECK-NEXT: store i8 [[CONV_3]], ptr [[ARRAYIDX_3]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT_3:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_2]], 1
; CHECK-NEXT: [[TMP31:%.*]] = trunc i64 [[INDVARS_IV_NEXT_3]] to i32
; CHECK-NEXT: [[SHL_4:%.*]] = shl i32 1, [[TMP31]]
; CHECK-NEXT: [[AND_4:%.*]] = and i32 [[SHL_4]], [[X]]
; CHECK-NEXT: [[TOBOOL_4:%.*]] = icmp eq i32 [[AND_4]], 0
; CHECK-NEXT: [[CONV_4:%.*]] = select i1 [[TOBOOL_4]], i8 48, i8 49
-; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDVARS_IV_NEXT_3]]
-; CHECK-NEXT: store i8 [[CONV_4]], i8* [[ARRAYIDX_4]], align 1
+; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDVARS_IV_NEXT_3]]
+; CHECK-NEXT: store i8 [[CONV_4]], ptr [[ARRAYIDX_4]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT_4:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_3]], 1
; CHECK-NEXT: [[TMP32:%.*]] = trunc i64 [[INDVARS_IV_NEXT_4]] to i32
; CHECK-NEXT: [[SHL_5:%.*]] = shl i32 1, [[TMP32]]
; CHECK-NEXT: [[AND_5:%.*]] = and i32 [[SHL_5]], [[X]]
; CHECK-NEXT: [[TOBOOL_5:%.*]] = icmp eq i32 [[AND_5]], 0
; CHECK-NEXT: [[CONV_5:%.*]] = select i1 [[TOBOOL_5]], i8 48, i8 49
-; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDVARS_IV_NEXT_4]]
-; CHECK-NEXT: store i8 [[CONV_5]], i8* [[ARRAYIDX_5]], align 1
+; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDVARS_IV_NEXT_4]]
+; CHECK-NEXT: store i8 [[CONV_5]], ptr [[ARRAYIDX_5]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT_5:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_4]], 1
; CHECK-NEXT: [[TMP33:%.*]] = trunc i64 [[INDVARS_IV_NEXT_5]] to i32
; CHECK-NEXT: [[SHL_6:%.*]] = shl i32 1, [[TMP33]]
; CHECK-NEXT: [[AND_6:%.*]] = and i32 [[SHL_6]], [[X]]
; CHECK-NEXT: [[TOBOOL_6:%.*]] = icmp eq i32 [[AND_6]], 0
; CHECK-NEXT: [[CONV_6:%.*]] = select i1 [[TOBOOL_6]], i8 48, i8 49
-; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDVARS_IV_NEXT_5]]
-; CHECK-NEXT: store i8 [[CONV_6]], i8* [[ARRAYIDX_6]], align 1
+; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDVARS_IV_NEXT_5]]
+; CHECK-NEXT: store i8 [[CONV_6]], ptr [[ARRAYIDX_6]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT_6:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_5]], 1
; CHECK-NEXT: [[TMP34:%.*]] = trunc i64 [[INDVARS_IV_NEXT_6]] to i32
; CHECK-NEXT: [[SHL_7:%.*]] = shl i32 1, [[TMP34]]
; CHECK-NEXT: [[AND_7:%.*]] = and i32 [[SHL_7]], [[X]]
; CHECK-NEXT: [[TOBOOL_7:%.*]] = icmp eq i32 [[AND_7]], 0
; CHECK-NEXT: [[CONV_7:%.*]] = select i1 [[TOBOOL_7]], i8 48, i8 49
-; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDVARS_IV_NEXT_6]]
-; CHECK-NEXT: store i8 [[CONV_7]], i8* [[ARRAYIDX_7]], align 1
+; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDVARS_IV_NEXT_6]]
+; CHECK-NEXT: store i8 [[CONV_7]], ptr [[ARRAYIDX_7]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT_7]] = add nuw nsw i64 [[INDVARS_IV_NEXT_6]], 1
; CHECK-NEXT: [[EXITCOND_7:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_7]], [[WIDE_TRIP_COUNT]]
; CHECK-NEXT: br i1 [[EXITCOND_7]], label [[FOR_END_LOOPEXIT_UNR_LCSSA:%.*]], label [[FOR_BODY]]
; CHECK-NEXT: br label [[FOR_END]]
; CHECK: for.end:
; CHECK-NEXT: [[IDXPROM1:%.*]] = sext i32 [[K]] to i64
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[IDXPROM1]]
-; CHECK-NEXT: store i8 0, i8* [[ARRAYIDX2]], align 1
-; CHECK-NEXT: ret i8* [[S]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[IDXPROM1]]
+; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX2]], align 1
+; CHECK-NEXT: ret ptr [[S]]
;
entry:
%cmp10 = icmp sgt i32 %k, 0
%1 = and <16 x i32> %0, %broadcast.splat
%2 = icmp eq <16 x i32> %1, zeroinitializer
%3 = select <16 x i1> %2, <16 x i8> <i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48>, <16 x i8> <i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49>
- %4 = getelementptr inbounds i8, i8* %s, i64 %index
- %5 = bitcast i8* %4 to <16 x i8>*
- store <16 x i8> %3, <16 x i8>* %5, align 1
+ %4 = getelementptr inbounds i8, ptr %s, i64 %index
+ store <16 x i8> %3, ptr %4, align 1
%index.next = add i64 %index, 16
%vec.ind.next13 = add <16 x i32> %vec.ind12, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
- %6 = icmp eq i64 %index.next, %n.vec
- br i1 %6, label %middle.block, label %vector.body
+ %5 = icmp eq i64 %index.next, %n.vec
+ br i1 %5, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
%cmp.n = icmp eq i64 %n.vec, %wide.trip.count
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
- %7 = trunc i64 %indvars.iv to i32
- %shl = shl i32 1, %7
+ %6 = trunc i64 %indvars.iv to i32
+ %shl = shl i32 1, %6
%and = and i32 %shl, %x
%tobool = icmp eq i32 %and, 0
%conv = select i1 %tobool, i8 48, i8 49
- %arrayidx = getelementptr inbounds i8, i8* %s, i64 %indvars.iv
- store i8 %conv, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %s, i64 %indvars.iv
+ store i8 %conv, ptr %arrayidx, align 1
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
br i1 %exitcond, label %for.end, label %for.body
for.end: ; preds = %for.body, %middle.block, %entry
%idxprom1 = sext i32 %k to i64
- %arrayidx2 = getelementptr inbounds i8, i8* %s, i64 %idxprom1
- store i8 0, i8* %arrayidx2, align 1
- ret i8* %s
+ %arrayidx2 = getelementptr inbounds i8, ptr %s, i64 %idxprom1
+ store i8 0, ptr %arrayidx2, align 1
+ ret ptr %s
}
target triple = "powerpc64le-unknown-linux-gnu"
; Function Attrs: norecurse nounwind
-define i8* @f(i8* returned %s, i32 zeroext %x, i32 signext %k) local_unnamed_addr #0 {
+define ptr @f(ptr returned %s, i32 zeroext %x, i32 signext %k) local_unnamed_addr #0 {
; CHECK-LABEL: @f(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[K:%.*]], 0
; CHECK-NEXT: [[TMP5:%.*]] = and <16 x i32> [[TMP4]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq <16 x i32> [[TMP5]], zeroinitializer
; CHECK-NEXT: [[TMP7:%.*]] = select <16 x i1> [[TMP6]], <16 x i8> <i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48>, <16 x i8> <i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49>
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, i8* [[S:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to <16 x i8>*
-; CHECK-NEXT: store <16 x i8> [[TMP7]], <16 x i8>* [[TMP9]], align 1
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: store <16 x i8> [[TMP7]], ptr [[TMP8]], align 1
; CHECK-NEXT: [[INDEX_NEXT:%.*]] = add nuw nsw i64 [[INDEX]], 16
; CHECK-NEXT: [[VEC_IND_NEXT13:%.*]] = add <16 x i32> [[VEC_IND12]], <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
; CHECK-NEXT: [[NITER_NEXT:%.*]] = add nuw nsw i64 [[NITER]], 1
; CHECK-NEXT: [[TMP11:%.*]] = and <16 x i32> [[TMP10]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <16 x i32> [[TMP11]], zeroinitializer
; CHECK-NEXT: [[TMP13:%.*]] = select <16 x i1> [[TMP12]], <16 x i8> <i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48>, <16 x i8> <i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49>
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDEX_NEXT]]
-; CHECK-NEXT: [[TMP15:%.*]] = bitcast i8* [[TMP14]] to <16 x i8>*
-; CHECK-NEXT: store <16 x i8> [[TMP13]], <16 x i8>* [[TMP15]], align 1
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDEX_NEXT]]
+; CHECK-NEXT: store <16 x i8> [[TMP13]], ptr [[TMP14]], align 1
; CHECK-NEXT: [[INDEX_NEXT_1]] = add i64 [[INDEX_NEXT]], 16
; CHECK-NEXT: [[VEC_IND_NEXT13_1]] = add <16 x i32> [[VEC_IND_NEXT13]], <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
; CHECK-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER_NEXT]], 1
; CHECK-NEXT: [[TMP17:%.*]] = and <16 x i32> [[TMP16]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq <16 x i32> [[TMP17]], zeroinitializer
; CHECK-NEXT: [[TMP19:%.*]] = select <16 x i1> [[TMP18]], <16 x i8> <i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48>, <16 x i8> <i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49>
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDEX_UNR]]
-; CHECK-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP20]] to <16 x i8>*
-; CHECK-NEXT: store <16 x i8> [[TMP19]], <16 x i8>* [[TMP21]], align 1
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDEX_UNR]]
+; CHECK-NEXT: store <16 x i8> [[TMP19]], ptr [[TMP20]], align 1
; CHECK-NEXT: br label [[MIDDLE_BLOCK]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[WIDE_TRIP_COUNT]]
; CHECK-NEXT: [[AND_PROL:%.*]] = and i32 [[SHL_PROL]], [[X]]
; CHECK-NEXT: [[TOBOOL_PROL:%.*]] = icmp eq i32 [[AND_PROL]], 0
; CHECK-NEXT: [[CONV_PROL:%.*]] = select i1 [[TOBOOL_PROL]], i8 48, i8 49
-; CHECK-NEXT: [[ARRAYIDX_PROL:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDVARS_IV_PROL]]
-; CHECK-NEXT: store i8 [[CONV_PROL]], i8* [[ARRAYIDX_PROL]], align 1
+; CHECK-NEXT: [[ARRAYIDX_PROL:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDVARS_IV_PROL]]
+; CHECK-NEXT: store i8 [[CONV_PROL]], ptr [[ARRAYIDX_PROL]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT_PROL]] = add nuw nsw i64 [[INDVARS_IV_PROL]], 1
; CHECK-NEXT: [[EXITCOND_PROL:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_PROL]], [[WIDE_TRIP_COUNT]]
; CHECK-NEXT: [[PROL_ITER_NEXT]] = add i64 [[PROL_ITER]], 1
; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL]], [[X]]
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: [[CONV:%.*]] = select i1 [[TOBOOL]], i8 48, i8 49
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: store i8 [[CONV]], i8* [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: store i8 [[CONV]], ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
; CHECK-NEXT: [[SHL_1:%.*]] = shl i32 1, [[TMP28]]
; CHECK-NEXT: [[AND_1:%.*]] = and i32 [[SHL_1]], [[X]]
; CHECK-NEXT: [[TOBOOL_1:%.*]] = icmp eq i32 [[AND_1]], 0
; CHECK-NEXT: [[CONV_1:%.*]] = select i1 [[TOBOOL_1]], i8 48, i8 49
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDVARS_IV_NEXT]]
-; CHECK-NEXT: store i8 [[CONV_1]], i8* [[ARRAYIDX_1]], align 1
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: store i8 [[CONV_1]], ptr [[ARRAYIDX_1]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT_1:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT]], 1
; CHECK-NEXT: [[TMP29:%.*]] = trunc i64 [[INDVARS_IV_NEXT_1]] to i32
; CHECK-NEXT: [[SHL_2:%.*]] = shl i32 1, [[TMP29]]
; CHECK-NEXT: [[AND_2:%.*]] = and i32 [[SHL_2]], [[X]]
; CHECK-NEXT: [[TOBOOL_2:%.*]] = icmp eq i32 [[AND_2]], 0
; CHECK-NEXT: [[CONV_2:%.*]] = select i1 [[TOBOOL_2]], i8 48, i8 49
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDVARS_IV_NEXT_1]]
-; CHECK-NEXT: store i8 [[CONV_2]], i8* [[ARRAYIDX_2]], align 1
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDVARS_IV_NEXT_1]]
+; CHECK-NEXT: store i8 [[CONV_2]], ptr [[ARRAYIDX_2]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT_2:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_1]], 1
; CHECK-NEXT: [[TMP30:%.*]] = trunc i64 [[INDVARS_IV_NEXT_2]] to i32
; CHECK-NEXT: [[SHL_3:%.*]] = shl i32 1, [[TMP30]]
; CHECK-NEXT: [[AND_3:%.*]] = and i32 [[SHL_3]], [[X]]
; CHECK-NEXT: [[TOBOOL_3:%.*]] = icmp eq i32 [[AND_3]], 0
; CHECK-NEXT: [[CONV_3:%.*]] = select i1 [[TOBOOL_3]], i8 48, i8 49
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDVARS_IV_NEXT_2]]
-; CHECK-NEXT: store i8 [[CONV_3]], i8* [[ARRAYIDX_3]], align 1
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDVARS_IV_NEXT_2]]
+; CHECK-NEXT: store i8 [[CONV_3]], ptr [[ARRAYIDX_3]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT_3:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_2]], 1
; CHECK-NEXT: [[TMP31:%.*]] = trunc i64 [[INDVARS_IV_NEXT_3]] to i32
; CHECK-NEXT: [[SHL_4:%.*]] = shl i32 1, [[TMP31]]
; CHECK-NEXT: [[AND_4:%.*]] = and i32 [[SHL_4]], [[X]]
; CHECK-NEXT: [[TOBOOL_4:%.*]] = icmp eq i32 [[AND_4]], 0
; CHECK-NEXT: [[CONV_4:%.*]] = select i1 [[TOBOOL_4]], i8 48, i8 49
-; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDVARS_IV_NEXT_3]]
-; CHECK-NEXT: store i8 [[CONV_4]], i8* [[ARRAYIDX_4]], align 1
+; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDVARS_IV_NEXT_3]]
+; CHECK-NEXT: store i8 [[CONV_4]], ptr [[ARRAYIDX_4]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT_4:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_3]], 1
; CHECK-NEXT: [[TMP32:%.*]] = trunc i64 [[INDVARS_IV_NEXT_4]] to i32
; CHECK-NEXT: [[SHL_5:%.*]] = shl i32 1, [[TMP32]]
; CHECK-NEXT: [[AND_5:%.*]] = and i32 [[SHL_5]], [[X]]
; CHECK-NEXT: [[TOBOOL_5:%.*]] = icmp eq i32 [[AND_5]], 0
; CHECK-NEXT: [[CONV_5:%.*]] = select i1 [[TOBOOL_5]], i8 48, i8 49
-; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDVARS_IV_NEXT_4]]
-; CHECK-NEXT: store i8 [[CONV_5]], i8* [[ARRAYIDX_5]], align 1
+; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDVARS_IV_NEXT_4]]
+; CHECK-NEXT: store i8 [[CONV_5]], ptr [[ARRAYIDX_5]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT_5:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_4]], 1
; CHECK-NEXT: [[TMP33:%.*]] = trunc i64 [[INDVARS_IV_NEXT_5]] to i32
; CHECK-NEXT: [[SHL_6:%.*]] = shl i32 1, [[TMP33]]
; CHECK-NEXT: [[AND_6:%.*]] = and i32 [[SHL_6]], [[X]]
; CHECK-NEXT: [[TOBOOL_6:%.*]] = icmp eq i32 [[AND_6]], 0
; CHECK-NEXT: [[CONV_6:%.*]] = select i1 [[TOBOOL_6]], i8 48, i8 49
-; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDVARS_IV_NEXT_5]]
-; CHECK-NEXT: store i8 [[CONV_6]], i8* [[ARRAYIDX_6]], align 1
+; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDVARS_IV_NEXT_5]]
+; CHECK-NEXT: store i8 [[CONV_6]], ptr [[ARRAYIDX_6]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT_6:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_5]], 1
; CHECK-NEXT: [[TMP34:%.*]] = trunc i64 [[INDVARS_IV_NEXT_6]] to i32
; CHECK-NEXT: [[SHL_7:%.*]] = shl i32 1, [[TMP34]]
; CHECK-NEXT: [[AND_7:%.*]] = and i32 [[SHL_7]], [[X]]
; CHECK-NEXT: [[TOBOOL_7:%.*]] = icmp eq i32 [[AND_7]], 0
; CHECK-NEXT: [[CONV_7:%.*]] = select i1 [[TOBOOL_7]], i8 48, i8 49
-; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[INDVARS_IV_NEXT_6]]
-; CHECK-NEXT: store i8 [[CONV_7]], i8* [[ARRAYIDX_7]], align 1
+; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[INDVARS_IV_NEXT_6]]
+; CHECK-NEXT: store i8 [[CONV_7]], ptr [[ARRAYIDX_7]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT_7]] = add nuw nsw i64 [[INDVARS_IV_NEXT_6]], 1
; CHECK-NEXT: [[EXITCOND_7:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_7]], [[WIDE_TRIP_COUNT]]
; CHECK-NEXT: br i1 [[EXITCOND_7]], label [[FOR_END_LOOPEXIT_UNR_LCSSA:%.*]], label [[FOR_BODY]]
; CHECK-NEXT: br label [[FOR_END]]
; CHECK: for.end:
; CHECK-NEXT: [[IDXPROM1:%.*]] = sext i32 [[K]] to i64
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, i8* [[S]], i64 [[IDXPROM1]]
-; CHECK-NEXT: store i8 0, i8* [[ARRAYIDX2]], align 1
-; CHECK-NEXT: ret i8* [[S]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[S]], i64 [[IDXPROM1]]
+; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX2]], align 1
+; CHECK-NEXT: ret ptr [[S]]
;
entry:
%cmp10 = icmp sgt i32 %k, 0
%1 = and <16 x i32> %0, %broadcast.splat
%2 = icmp eq <16 x i32> %1, zeroinitializer
%3 = select <16 x i1> %2, <16 x i8> <i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48>, <16 x i8> <i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49>
- %4 = getelementptr inbounds i8, i8* %s, i64 %index
- %5 = bitcast i8* %4 to <16 x i8>*
- store <16 x i8> %3, <16 x i8>* %5, align 1
+ %4 = getelementptr inbounds i8, ptr %s, i64 %index
+ store <16 x i8> %3, ptr %4, align 1
%index.next = add i64 %index, 16
%vec.ind.next13 = add <16 x i32> %vec.ind12, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
- %6 = icmp eq i64 %index.next, %n.vec
- br i1 %6, label %middle.block, label %vector.body
+ %5 = icmp eq i64 %index.next, %n.vec
+ br i1 %5, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
%cmp.n = icmp eq i64 %n.vec, %wide.trip.count
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
- %7 = trunc i64 %indvars.iv to i32
- %shl = shl i32 1, %7
+ %6 = trunc i64 %indvars.iv to i32
+ %shl = shl i32 1, %6
%and = and i32 %shl, %x
%tobool = icmp eq i32 %and, 0
%conv = select i1 %tobool, i8 48, i8 49
- %arrayidx = getelementptr inbounds i8, i8* %s, i64 %indvars.iv
- store i8 %conv, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %s, i64 %indvars.iv
+ store i8 %conv, ptr %arrayidx, align 1
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
br i1 %exitcond, label %for.end, label %for.body
for.end: ; preds = %for.body, %middle.block, %entry
%idxprom1 = sext i32 %k to i64
- %arrayidx2 = getelementptr inbounds i8, i8* %s, i64 %idxprom1
- store i8 0, i8* %arrayidx2, align 1
- ret i8* %s
+ %arrayidx2 = getelementptr inbounds i8, ptr %s, i64 %idxprom1
+ store i8 0, ptr %arrayidx2, align 1
+ ret ptr %s
}
target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
target triple = "riscv64-unknown-unknown"
-define void @invalid(<vscale x 1 x i8>* %p) nounwind ssp {
+define void @invalid(ptr %p) nounwind ssp {
; CHECK-LABEL: @invalid(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[A:%.*]] = load <vscale x 1 x i8>, <vscale x 1 x i8>* [[P:%.*]], align 1
+; CHECK-NEXT: [[A:%.*]] = load <vscale x 1 x i8>, ptr [[P:%.*]], align 1
; CHECK-NEXT: [[B:%.*]] = add <vscale x 1 x i8> [[A]], [[A]]
-; CHECK-NEXT: store <vscale x 1 x i8> [[B]], <vscale x 1 x i8>* [[P]], align 1
+; CHECK-NEXT: store <vscale x 1 x i8> [[B]], ptr [[P]], align 1
; CHECK-NEXT: [[INC]] = add nsw i32 [[I_0]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], 10
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
for.body: ; preds = %for.body, %entry
%i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %a = load <vscale x 1 x i8>, <vscale x 1 x i8>* %p
+ %a = load <vscale x 1 x i8>, ptr %p
%b = add <vscale x 1 x i8> %a, %a
- store <vscale x 1 x i8> %b, <vscale x 1 x i8>* %p
+ store <vscale x 1 x i8> %b, ptr %p
%inc = add nsw i32 %i.0, 1
%cmp = icmp slt i32 %i.0, 10
br i1 %cmp, label %for.body, label %for.end
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt %s -S -mtriple=riscv64 -passes=loop-unroll -mcpu=sifive-s76 | FileCheck %s
-define dso_local void @saxpy(float %a, float* %x, float* %y) {
+define dso_local void @saxpy(float %a, ptr %x, ptr %y) {
; CHECK-LABEL: @saxpy(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT_15:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[X:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[TMP0]], [[A:%.*]]
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[Y:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[Y:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[MUL]], [[TMP1]]
-; CHECK-NEXT: store float [[ADD]], float* [[ARRAYIDX2]], align 4
+; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT]]
-; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
; CHECK-NEXT: [[MUL_1:%.*]] = fmul fast float [[TMP2]], [[A]]
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT]]
-; CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX2_1]], align 4
; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float [[MUL_1]], [[TMP3]]
-; CHECK-NEXT: store float [[ADD_1]], float* [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: store float [[ADD_1]], ptr [[ARRAYIDX2_1]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_1:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT]], 1
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_1]]
-; CHECK-NEXT: [[TMP4:%.*]] = load float, float* [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_1]]
+; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
; CHECK-NEXT: [[MUL_2:%.*]] = fmul fast float [[TMP4]], [[A]]
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_1]]
-; CHECK-NEXT: [[TMP5:%.*]] = load float, float* [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_1]]
+; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX2_2]], align 4
; CHECK-NEXT: [[ADD_2:%.*]] = fadd fast float [[MUL_2]], [[TMP5]]
-; CHECK-NEXT: store float [[ADD_2]], float* [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: store float [[ADD_2]], ptr [[ARRAYIDX2_2]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_2:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_1]], 1
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_2]]
-; CHECK-NEXT: [[TMP6:%.*]] = load float, float* [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_2]]
+; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
; CHECK-NEXT: [[MUL_3:%.*]] = fmul fast float [[TMP6]], [[A]]
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_2]]
-; CHECK-NEXT: [[TMP7:%.*]] = load float, float* [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_2]]
+; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX2_3]], align 4
; CHECK-NEXT: [[ADD_3:%.*]] = fadd fast float [[MUL_3]], [[TMP7]]
-; CHECK-NEXT: store float [[ADD_3]], float* [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: store float [[ADD_3]], ptr [[ARRAYIDX2_3]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_3:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_2]], 1
-; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_3]]
-; CHECK-NEXT: [[TMP8:%.*]] = load float, float* [[ARRAYIDX_4]], align 4
+; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_3]]
+; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX_4]], align 4
; CHECK-NEXT: [[MUL_4:%.*]] = fmul fast float [[TMP8]], [[A]]
-; CHECK-NEXT: [[ARRAYIDX2_4:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_3]]
-; CHECK-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX2_4]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_4:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_3]]
+; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2_4]], align 4
; CHECK-NEXT: [[ADD_4:%.*]] = fadd fast float [[MUL_4]], [[TMP9]]
-; CHECK-NEXT: store float [[ADD_4]], float* [[ARRAYIDX2_4]], align 4
+; CHECK-NEXT: store float [[ADD_4]], ptr [[ARRAYIDX2_4]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_4:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_3]], 1
-; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_4]]
-; CHECK-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX_5]], align 4
+; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_4]]
+; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX_5]], align 4
; CHECK-NEXT: [[MUL_5:%.*]] = fmul fast float [[TMP10]], [[A]]
-; CHECK-NEXT: [[ARRAYIDX2_5:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_4]]
-; CHECK-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX2_5]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_5:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_4]]
+; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2_5]], align 4
; CHECK-NEXT: [[ADD_5:%.*]] = fadd fast float [[MUL_5]], [[TMP11]]
-; CHECK-NEXT: store float [[ADD_5]], float* [[ARRAYIDX2_5]], align 4
+; CHECK-NEXT: store float [[ADD_5]], ptr [[ARRAYIDX2_5]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_5:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_4]], 1
-; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_5]]
-; CHECK-NEXT: [[TMP12:%.*]] = load float, float* [[ARRAYIDX_6]], align 4
+; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_5]]
+; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX_6]], align 4
; CHECK-NEXT: [[MUL_6:%.*]] = fmul fast float [[TMP12]], [[A]]
-; CHECK-NEXT: [[ARRAYIDX2_6:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_5]]
-; CHECK-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX2_6]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_6:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_5]]
+; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX2_6]], align 4
; CHECK-NEXT: [[ADD_6:%.*]] = fadd fast float [[MUL_6]], [[TMP13]]
-; CHECK-NEXT: store float [[ADD_6]], float* [[ARRAYIDX2_6]], align 4
+; CHECK-NEXT: store float [[ADD_6]], ptr [[ARRAYIDX2_6]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_6:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_5]], 1
-; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_6]]
-; CHECK-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX_7]], align 4
+; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_6]]
+; CHECK-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX_7]], align 4
; CHECK-NEXT: [[MUL_7:%.*]] = fmul fast float [[TMP14]], [[A]]
-; CHECK-NEXT: [[ARRAYIDX2_7:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_6]]
-; CHECK-NEXT: [[TMP15:%.*]] = load float, float* [[ARRAYIDX2_7]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_7:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_6]]
+; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX2_7]], align 4
; CHECK-NEXT: [[ADD_7:%.*]] = fadd fast float [[MUL_7]], [[TMP15]]
-; CHECK-NEXT: store float [[ADD_7]], float* [[ARRAYIDX2_7]], align 4
+; CHECK-NEXT: store float [[ADD_7]], ptr [[ARRAYIDX2_7]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_7:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_6]], 1
-; CHECK-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_7]]
-; CHECK-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX_8]], align 4
+; CHECK-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_7]]
+; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX_8]], align 4
; CHECK-NEXT: [[MUL_8:%.*]] = fmul fast float [[TMP16]], [[A]]
-; CHECK-NEXT: [[ARRAYIDX2_8:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_7]]
-; CHECK-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX2_8]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_8:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_7]]
+; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2_8]], align 4
; CHECK-NEXT: [[ADD_8:%.*]] = fadd fast float [[MUL_8]], [[TMP17]]
-; CHECK-NEXT: store float [[ADD_8]], float* [[ARRAYIDX2_8]], align 4
+; CHECK-NEXT: store float [[ADD_8]], ptr [[ARRAYIDX2_8]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_8:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_7]], 1
-; CHECK-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_8]]
-; CHECK-NEXT: [[TMP18:%.*]] = load float, float* [[ARRAYIDX_9]], align 4
+; CHECK-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_8]]
+; CHECK-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX_9]], align 4
; CHECK-NEXT: [[MUL_9:%.*]] = fmul fast float [[TMP18]], [[A]]
-; CHECK-NEXT: [[ARRAYIDX2_9:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_8]]
-; CHECK-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX2_9]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_9:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_8]]
+; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX2_9]], align 4
; CHECK-NEXT: [[ADD_9:%.*]] = fadd fast float [[MUL_9]], [[TMP19]]
-; CHECK-NEXT: store float [[ADD_9]], float* [[ARRAYIDX2_9]], align 4
+; CHECK-NEXT: store float [[ADD_9]], ptr [[ARRAYIDX2_9]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_9:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_8]], 1
-; CHECK-NEXT: [[ARRAYIDX_10:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_9]]
-; CHECK-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX_10]], align 4
+; CHECK-NEXT: [[ARRAYIDX_10:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_9]]
+; CHECK-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX_10]], align 4
; CHECK-NEXT: [[MUL_10:%.*]] = fmul fast float [[TMP20]], [[A]]
-; CHECK-NEXT: [[ARRAYIDX2_10:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_9]]
-; CHECK-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX2_10]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_10:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_9]]
+; CHECK-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX2_10]], align 4
; CHECK-NEXT: [[ADD_10:%.*]] = fadd fast float [[MUL_10]], [[TMP21]]
-; CHECK-NEXT: store float [[ADD_10]], float* [[ARRAYIDX2_10]], align 4
+; CHECK-NEXT: store float [[ADD_10]], ptr [[ARRAYIDX2_10]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_10:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_9]], 1
-; CHECK-NEXT: [[ARRAYIDX_11:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_10]]
-; CHECK-NEXT: [[TMP22:%.*]] = load float, float* [[ARRAYIDX_11]], align 4
+; CHECK-NEXT: [[ARRAYIDX_11:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_10]]
+; CHECK-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX_11]], align 4
; CHECK-NEXT: [[MUL_11:%.*]] = fmul fast float [[TMP22]], [[A]]
-; CHECK-NEXT: [[ARRAYIDX2_11:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_10]]
-; CHECK-NEXT: [[TMP23:%.*]] = load float, float* [[ARRAYIDX2_11]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_11:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_10]]
+; CHECK-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDX2_11]], align 4
; CHECK-NEXT: [[ADD_11:%.*]] = fadd fast float [[MUL_11]], [[TMP23]]
-; CHECK-NEXT: store float [[ADD_11]], float* [[ARRAYIDX2_11]], align 4
+; CHECK-NEXT: store float [[ADD_11]], ptr [[ARRAYIDX2_11]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_11:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_10]], 1
-; CHECK-NEXT: [[ARRAYIDX_12:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_11]]
-; CHECK-NEXT: [[TMP24:%.*]] = load float, float* [[ARRAYIDX_12]], align 4
+; CHECK-NEXT: [[ARRAYIDX_12:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_11]]
+; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr [[ARRAYIDX_12]], align 4
; CHECK-NEXT: [[MUL_12:%.*]] = fmul fast float [[TMP24]], [[A]]
-; CHECK-NEXT: [[ARRAYIDX2_12:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_11]]
-; CHECK-NEXT: [[TMP25:%.*]] = load float, float* [[ARRAYIDX2_12]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_12:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_11]]
+; CHECK-NEXT: [[TMP25:%.*]] = load float, ptr [[ARRAYIDX2_12]], align 4
; CHECK-NEXT: [[ADD_12:%.*]] = fadd fast float [[MUL_12]], [[TMP25]]
-; CHECK-NEXT: store float [[ADD_12]], float* [[ARRAYIDX2_12]], align 4
+; CHECK-NEXT: store float [[ADD_12]], ptr [[ARRAYIDX2_12]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_12:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_11]], 1
-; CHECK-NEXT: [[ARRAYIDX_13:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_12]]
-; CHECK-NEXT: [[TMP26:%.*]] = load float, float* [[ARRAYIDX_13]], align 4
+; CHECK-NEXT: [[ARRAYIDX_13:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_12]]
+; CHECK-NEXT: [[TMP26:%.*]] = load float, ptr [[ARRAYIDX_13]], align 4
; CHECK-NEXT: [[MUL_13:%.*]] = fmul fast float [[TMP26]], [[A]]
-; CHECK-NEXT: [[ARRAYIDX2_13:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_12]]
-; CHECK-NEXT: [[TMP27:%.*]] = load float, float* [[ARRAYIDX2_13]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_13:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_12]]
+; CHECK-NEXT: [[TMP27:%.*]] = load float, ptr [[ARRAYIDX2_13]], align 4
; CHECK-NEXT: [[ADD_13:%.*]] = fadd fast float [[MUL_13]], [[TMP27]]
-; CHECK-NEXT: store float [[ADD_13]], float* [[ARRAYIDX2_13]], align 4
+; CHECK-NEXT: store float [[ADD_13]], ptr [[ARRAYIDX2_13]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_13:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_12]], 1
-; CHECK-NEXT: [[ARRAYIDX_14:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_13]]
-; CHECK-NEXT: [[TMP28:%.*]] = load float, float* [[ARRAYIDX_14]], align 4
+; CHECK-NEXT: [[ARRAYIDX_14:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_13]]
+; CHECK-NEXT: [[TMP28:%.*]] = load float, ptr [[ARRAYIDX_14]], align 4
; CHECK-NEXT: [[MUL_14:%.*]] = fmul fast float [[TMP28]], [[A]]
-; CHECK-NEXT: [[ARRAYIDX2_14:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_13]]
-; CHECK-NEXT: [[TMP29:%.*]] = load float, float* [[ARRAYIDX2_14]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_14:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_13]]
+; CHECK-NEXT: [[TMP29:%.*]] = load float, ptr [[ARRAYIDX2_14]], align 4
; CHECK-NEXT: [[ADD_14:%.*]] = fadd fast float [[MUL_14]], [[TMP29]]
-; CHECK-NEXT: store float [[ADD_14]], float* [[ARRAYIDX2_14]], align 4
+; CHECK-NEXT: store float [[ADD_14]], ptr [[ARRAYIDX2_14]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_14:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_13]], 1
-; CHECK-NEXT: [[ARRAYIDX_15:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_14]]
-; CHECK-NEXT: [[TMP30:%.*]] = load float, float* [[ARRAYIDX_15]], align 4
+; CHECK-NEXT: [[ARRAYIDX_15:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_14]]
+; CHECK-NEXT: [[TMP30:%.*]] = load float, ptr [[ARRAYIDX_15]], align 4
; CHECK-NEXT: [[MUL_15:%.*]] = fmul fast float [[TMP30]], [[A]]
-; CHECK-NEXT: [[ARRAYIDX2_15:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_14]]
-; CHECK-NEXT: [[TMP31:%.*]] = load float, float* [[ARRAYIDX2_15]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_15:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_14]]
+; CHECK-NEXT: [[TMP31:%.*]] = load float, ptr [[ARRAYIDX2_15]], align 4
; CHECK-NEXT: [[ADD_15:%.*]] = fadd fast float [[MUL_15]], [[TMP31]]
-; CHECK-NEXT: store float [[ADD_15]], float* [[ARRAYIDX2_15]], align 4
+; CHECK-NEXT: store float [[ADD_15]], ptr [[ARRAYIDX2_15]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_15]] = add nuw nsw i64 [[INDVARS_IV_NEXT_14]], 1
; CHECK-NEXT: [[EXITCOND_NOT_15:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_15]], 64
; CHECK-NEXT: br i1 [[EXITCOND_NOT_15]], label [[EXIT_LOOP:%.*]], label [[FOR_BODY]]
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float, float* %x, i64 %indvars.iv
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %x, i64 %indvars.iv
+ %0 = load float, ptr %arrayidx, align 4
%mul = fmul fast float %0, %a
- %arrayidx2 = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %1 = load float, float* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds float, ptr %y, i64 %indvars.iv
+ %1 = load float, ptr %arrayidx2, align 4
%add = fadd fast float %mul, %1
- store float %add, float* %arrayidx2, align 4
+ store float %add, ptr %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond.not = icmp eq i64 %indvars.iv.next, 64
br i1 %exitcond.not, label %exit_loop, label %for.body
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-define hidden void @compile_time_full(i8* nocapture %a, i8* nocapture readonly %b) {
+define hidden void @compile_time_full(ptr nocapture %a, ptr nocapture readonly %b) {
; CHECK-LABEL: @compile_time_full(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[I:%.*]] = load i8, i8* [[B:%.*]], align 1
-; CHECK-NEXT: store i8 [[I]], i8* [[A:%.*]], align 1
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, i8* [[B]], i32 1
-; CHECK-NEXT: [[I_1:%.*]] = load i8, i8* [[ARRAYIDX_1]], align 1
-; CHECK-NEXT: [[ARRAYIDX1_1:%.*]] = getelementptr inbounds i8, i8* [[A]], i32 1
-; CHECK-NEXT: store i8 [[I_1]], i8* [[ARRAYIDX1_1]], align 1
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i8, i8* [[B]], i32 2
-; CHECK-NEXT: [[I_2:%.*]] = load i8, i8* [[ARRAYIDX_2]], align 1
-; CHECK-NEXT: [[ARRAYIDX1_2:%.*]] = getelementptr inbounds i8, i8* [[A]], i32 2
-; CHECK-NEXT: store i8 [[I_2]], i8* [[ARRAYIDX1_2]], align 1
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i8, i8* [[B]], i32 3
-; CHECK-NEXT: [[I_3:%.*]] = load i8, i8* [[ARRAYIDX_3]], align 1
-; CHECK-NEXT: [[ARRAYIDX1_3:%.*]] = getelementptr inbounds i8, i8* [[A]], i32 3
-; CHECK-NEXT: store i8 [[I_3]], i8* [[ARRAYIDX1_3]], align 1
-; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i8, i8* [[B]], i32 4
-; CHECK-NEXT: [[I_4:%.*]] = load i8, i8* [[ARRAYIDX_4]], align 1
-; CHECK-NEXT: [[ARRAYIDX1_4:%.*]] = getelementptr inbounds i8, i8* [[A]], i32 4
-; CHECK-NEXT: store i8 [[I_4]], i8* [[ARRAYIDX1_4]], align 1
-; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i8, i8* [[B]], i32 5
-; CHECK-NEXT: [[I_5:%.*]] = load i8, i8* [[ARRAYIDX_5]], align 1
-; CHECK-NEXT: [[ARRAYIDX1_5:%.*]] = getelementptr inbounds i8, i8* [[A]], i32 5
-; CHECK-NEXT: store i8 [[I_5]], i8* [[ARRAYIDX1_5]], align 1
-; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i8, i8* [[B]], i32 6
-; CHECK-NEXT: [[I_6:%.*]] = load i8, i8* [[ARRAYIDX_6]], align 1
-; CHECK-NEXT: [[ARRAYIDX1_6:%.*]] = getelementptr inbounds i8, i8* [[A]], i32 6
-; CHECK-NEXT: store i8 [[I_6]], i8* [[ARRAYIDX1_6]], align 1
-; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i8, i8* [[B]], i32 7
-; CHECK-NEXT: [[I_7:%.*]] = load i8, i8* [[ARRAYIDX_7]], align 1
-; CHECK-NEXT: [[ARRAYIDX1_7:%.*]] = getelementptr inbounds i8, i8* [[A]], i32 7
-; CHECK-NEXT: store i8 [[I_7]], i8* [[ARRAYIDX1_7]], align 1
-; CHECK-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds i8, i8* [[B]], i32 8
-; CHECK-NEXT: [[I_8:%.*]] = load i8, i8* [[ARRAYIDX_8]], align 1
-; CHECK-NEXT: [[ARRAYIDX1_8:%.*]] = getelementptr inbounds i8, i8* [[A]], i32 8
-; CHECK-NEXT: store i8 [[I_8]], i8* [[ARRAYIDX1_8]], align 1
-; CHECK-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds i8, i8* [[B]], i32 9
-; CHECK-NEXT: [[I_9:%.*]] = load i8, i8* [[ARRAYIDX_9]], align 1
-; CHECK-NEXT: [[ARRAYIDX1_9:%.*]] = getelementptr inbounds i8, i8* [[A]], i32 9
-; CHECK-NEXT: store i8 [[I_9]], i8* [[ARRAYIDX1_9]], align 1
+; CHECK-NEXT: [[I:%.*]] = load i8, ptr [[B:%.*]], align 1
+; CHECK-NEXT: store i8 [[I]], ptr [[A:%.*]], align 1
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 1
+; CHECK-NEXT: [[I_1:%.*]] = load i8, ptr [[ARRAYIDX_1]], align 1
+; CHECK-NEXT: [[ARRAYIDX1_1:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 1
+; CHECK-NEXT: store i8 [[I_1]], ptr [[ARRAYIDX1_1]], align 1
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 2
+; CHECK-NEXT: [[I_2:%.*]] = load i8, ptr [[ARRAYIDX_2]], align 1
+; CHECK-NEXT: [[ARRAYIDX1_2:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 2
+; CHECK-NEXT: store i8 [[I_2]], ptr [[ARRAYIDX1_2]], align 1
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 3
+; CHECK-NEXT: [[I_3:%.*]] = load i8, ptr [[ARRAYIDX_3]], align 1
+; CHECK-NEXT: [[ARRAYIDX1_3:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 3
+; CHECK-NEXT: store i8 [[I_3]], ptr [[ARRAYIDX1_3]], align 1
+; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 4
+; CHECK-NEXT: [[I_4:%.*]] = load i8, ptr [[ARRAYIDX_4]], align 1
+; CHECK-NEXT: [[ARRAYIDX1_4:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 4
+; CHECK-NEXT: store i8 [[I_4]], ptr [[ARRAYIDX1_4]], align 1
+; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 5
+; CHECK-NEXT: [[I_5:%.*]] = load i8, ptr [[ARRAYIDX_5]], align 1
+; CHECK-NEXT: [[ARRAYIDX1_5:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 5
+; CHECK-NEXT: store i8 [[I_5]], ptr [[ARRAYIDX1_5]], align 1
+; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 6
+; CHECK-NEXT: [[I_6:%.*]] = load i8, ptr [[ARRAYIDX_6]], align 1
+; CHECK-NEXT: [[ARRAYIDX1_6:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 6
+; CHECK-NEXT: store i8 [[I_6]], ptr [[ARRAYIDX1_6]], align 1
+; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 7
+; CHECK-NEXT: [[I_7:%.*]] = load i8, ptr [[ARRAYIDX_7]], align 1
+; CHECK-NEXT: [[ARRAYIDX1_7:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 7
+; CHECK-NEXT: store i8 [[I_7]], ptr [[ARRAYIDX1_7]], align 1
+; CHECK-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 8
+; CHECK-NEXT: [[I_8:%.*]] = load i8, ptr [[ARRAYIDX_8]], align 1
+; CHECK-NEXT: [[ARRAYIDX1_8:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 8
+; CHECK-NEXT: store i8 [[I_8]], ptr [[ARRAYIDX1_8]], align 1
+; CHECK-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 9
+; CHECK-NEXT: [[I_9:%.*]] = load i8, ptr [[ARRAYIDX_9]], align 1
+; CHECK-NEXT: [[ARRAYIDX1_9:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 9
+; CHECK-NEXT: store i8 [[I_9]], ptr [[ARRAYIDX1_9]], align 1
; CHECK-NEXT: ret void
;
entry:
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %b, i32 %i.06
- %i = load i8, i8* %arrayidx, align 1
- %arrayidx1 = getelementptr inbounds i8, i8* %a, i32 %i.06
- store i8 %i, i8* %arrayidx1, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %b, i32 %i.06
+ %i = load i8, ptr %arrayidx, align 1
+ %arrayidx1 = getelementptr inbounds i8, ptr %a, i32 %i.06
+ store i8 %i, ptr %arrayidx1, align 1
%inc = add nuw nsw i32 %i.06, 1
%exitcond.not = icmp eq i32 %inc, 10
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
}
-define hidden void @compile_time_partial(i16* nocapture %a, i16* nocapture readonly %b) {
+define hidden void @compile_time_partial(ptr nocapture %a, ptr nocapture readonly %b) {
; CHECK-LABEL: @compile_time_partial(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[I_07:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC_3:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[B:%.*]], i32 [[I_07]]
-; CHECK-NEXT: [[I:%.*]] = load i16, i16* [[ARRAYIDX]], align 2
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[B:%.*]], i32 [[I_07]]
+; CHECK-NEXT: [[I:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
; CHECK-NEXT: [[ADD:%.*]] = add i16 [[I]], 1
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[A:%.*]], i32 [[I_07]]
-; CHECK-NEXT: store i16 [[ADD]], i16* [[ARRAYIDX2]], align 2
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, ptr [[A:%.*]], i32 [[I_07]]
+; CHECK-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX2]], align 2
; CHECK-NEXT: [[INC:%.*]] = or i32 [[I_07]], 1
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i16, i16* [[B]], i32 [[INC]]
-; CHECK-NEXT: [[I_1:%.*]] = load i16, i16* [[ARRAYIDX_1]], align 2
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[INC]]
+; CHECK-NEXT: [[I_1:%.*]] = load i16, ptr [[ARRAYIDX_1]], align 2
; CHECK-NEXT: [[ADD_1:%.*]] = add i16 [[I_1]], 1
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i16, i16* [[A]], i32 [[INC]]
-; CHECK-NEXT: store i16 [[ADD_1]], i16* [[ARRAYIDX2_1]], align 2
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i16, ptr [[A]], i32 [[INC]]
+; CHECK-NEXT: store i16 [[ADD_1]], ptr [[ARRAYIDX2_1]], align 2
; CHECK-NEXT: [[INC_1:%.*]] = or i32 [[I_07]], 2
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i16, i16* [[B]], i32 [[INC_1]]
-; CHECK-NEXT: [[I_2:%.*]] = load i16, i16* [[ARRAYIDX_2]], align 2
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[INC_1]]
+; CHECK-NEXT: [[I_2:%.*]] = load i16, ptr [[ARRAYIDX_2]], align 2
; CHECK-NEXT: [[ADD_2:%.*]] = add i16 [[I_2]], 1
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i16, i16* [[A]], i32 [[INC_1]]
-; CHECK-NEXT: store i16 [[ADD_2]], i16* [[ARRAYIDX2_2]], align 2
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i16, ptr [[A]], i32 [[INC_1]]
+; CHECK-NEXT: store i16 [[ADD_2]], ptr [[ARRAYIDX2_2]], align 2
; CHECK-NEXT: [[INC_2:%.*]] = or i32 [[I_07]], 3
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i16, i16* [[B]], i32 [[INC_2]]
-; CHECK-NEXT: [[I_3:%.*]] = load i16, i16* [[ARRAYIDX_3]], align 2
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[INC_2]]
+; CHECK-NEXT: [[I_3:%.*]] = load i16, ptr [[ARRAYIDX_3]], align 2
; CHECK-NEXT: [[ADD_3:%.*]] = add i16 [[I_3]], 1
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i16, i16* [[A]], i32 [[INC_2]]
-; CHECK-NEXT: store i16 [[ADD_3]], i16* [[ARRAYIDX2_3]], align 2
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i16, ptr [[A]], i32 [[INC_2]]
+; CHECK-NEXT: store i16 [[ADD_3]], ptr [[ARRAYIDX2_3]], align 2
; CHECK-NEXT: [[INC_3]] = add nuw nsw i32 [[I_07]], 4
; CHECK-NEXT: [[EXITCOND_NOT_3:%.*]] = icmp eq i32 [[INC_3]], 1000
; CHECK-NEXT: br i1 [[EXITCOND_NOT_3]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
for.body: ; preds = %for.body, %entry
%i.07 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i16, i16* %b, i32 %i.07
- %i = load i16, i16* %arrayidx, align 2
+ %arrayidx = getelementptr inbounds i16, ptr %b, i32 %i.07
+ %i = load i16, ptr %arrayidx, align 2
%add = add i16 %i, 1
- %arrayidx2 = getelementptr inbounds i16, i16* %a, i32 %i.07
- store i16 %add, i16* %arrayidx2, align 2
+ %arrayidx2 = getelementptr inbounds i16, ptr %a, i32 %i.07
+ store i16 %add, ptr %arrayidx2, align 2
%inc = add nuw nsw i32 %i.07, 1
%exitcond.not = icmp eq i32 %inc, 1000
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
}
-define hidden void @runtime(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) {
+define hidden void @runtime(ptr nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) {
; CHECK-LABEL: @runtime(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP8_NOT:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: [[LCMP_MOD_NOT:%.*]] = icmp eq i32 [[XTRAITER]], 0
; CHECK-NEXT: br i1 [[LCMP_MOD_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY_EPIL:%.*]]
; CHECK: for.body.epil:
-; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[I_09_UNR]]
-; CHECK-NEXT: [[I_EPIL:%.*]] = load i32, i32* [[ARRAYIDX_EPIL]], align 4
-; CHECK-NEXT: [[ARRAYIDX1_EPIL:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i32 [[I_09_UNR]]
-; CHECK-NEXT: [[I1_EPIL:%.*]] = load i32, i32* [[ARRAYIDX1_EPIL]], align 4
+; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[I_09_UNR]]
+; CHECK-NEXT: [[I_EPIL:%.*]] = load i32, ptr [[ARRAYIDX_EPIL]], align 4
+; CHECK-NEXT: [[ARRAYIDX1_EPIL:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i32 [[I_09_UNR]]
+; CHECK-NEXT: [[I1_EPIL:%.*]] = load i32, ptr [[ARRAYIDX1_EPIL]], align 4
; CHECK-NEXT: [[MUL_EPIL:%.*]] = mul nsw i32 [[I1_EPIL]], [[I_EPIL]]
-; CHECK-NEXT: [[ARRAYIDX2_EPIL:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[I_09_UNR]]
-; CHECK-NEXT: store i32 [[MUL_EPIL]], i32* [[ARRAYIDX2_EPIL]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I_09_UNR]]
+; CHECK-NEXT: store i32 [[MUL_EPIL]], ptr [[ARRAYIDX2_EPIL]], align 4
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[I_09:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER_NEW]] ], [ [[INC_1]], [[FOR_BODY]] ]
; CHECK-NEXT: [[NITER:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER_NEW]] ], [ [[NITER_NEXT_1:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[I_09]]
-; CHECK-NEXT: [[I:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[C]], i32 [[I_09]]
-; CHECK-NEXT: [[I1:%.*]] = load i32, i32* [[ARRAYIDX1]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[I_09]]
+; CHECK-NEXT: [[I:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[C]], i32 [[I_09]]
+; CHECK-NEXT: [[I1:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I1]], [[I]]
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[I_09]]
-; CHECK-NEXT: store i32 [[MUL]], i32* [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[I_09]]
+; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[INC:%.*]] = or i32 [[I_09]], 1
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[INC]]
-; CHECK-NEXT: [[I_1:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX1_1:%.*]] = getelementptr inbounds i32, i32* [[C]], i32 [[INC]]
-; CHECK-NEXT: [[I1_1:%.*]] = load i32, i32* [[ARRAYIDX1_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[INC]]
+; CHECK-NEXT: [[I_1:%.*]] = load i32, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX1_1:%.*]] = getelementptr inbounds i32, ptr [[C]], i32 [[INC]]
+; CHECK-NEXT: [[I1_1:%.*]] = load i32, ptr [[ARRAYIDX1_1]], align 4
; CHECK-NEXT: [[MUL_1:%.*]] = mul nsw i32 [[I1_1]], [[I_1]]
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[INC]]
-; CHECK-NEXT: store i32 [[MUL_1]], i32* [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[INC]]
+; CHECK-NEXT: store i32 [[MUL_1]], ptr [[ARRAYIDX2_1]], align 4
; CHECK-NEXT: [[INC_1]] = add nuw i32 [[I_09]], 2
; CHECK-NEXT: [[NITER_NEXT_1]] = add i32 [[NITER]], 2
; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i32 [[NITER_NEXT_1]], [[UNROLL_ITER]]
for.body: ; preds = %for.body, %entry
%i.09 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %b, i32 %i.09
- %i = load i32, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32, i32* %c, i32 %i.09
- %i1 = load i32, i32* %arrayidx1, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %b, i32 %i.09
+ %i = load i32, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr %c, i32 %i.09
+ %i1 = load i32, ptr %arrayidx1, align 4
%mul = mul nsw i32 %i1, %i
- %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 %i.09
- store i32 %mul, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %a, i32 %i.09
+ store i32 %mul, ptr %arrayidx2, align 4
%inc = add nuw i32 %i.09, 1
%exitcond.not = icmp eq i32 %inc, %N
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
}
-define hidden void @dont_unroll_call(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) {
+define hidden void @dont_unroll_call(ptr nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) {
; CHECK-LABEL: @dont_unroll_call(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP12_NOT:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[I_013:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[I_013]]
-; CHECK-NEXT: [[I:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i32 [[I_013]]
-; CHECK-NEXT: [[I1:%.*]] = load i32, i32* [[ARRAYIDX1]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[I_013]]
+; CHECK-NEXT: [[I:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i32 [[I_013]]
+; CHECK-NEXT: [[I1:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I1]], [[I]]
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[I_013]]
-; CHECK-NEXT: store i32 [[MUL]], i32* [[ARRAYIDX2]], align 4
-; CHECK-NEXT: call i32 (i8*, ...) @printf(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([12 x i8], [12 x i8]* @.str, i32 0, i32 0), i32 [[I_013]], i32 [[MUL]])
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I_013]]
+; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(1) @.str, i32 [[I_013]], i32 [[MUL]])
; CHECK-NEXT: [[INC]] = add nuw i32 [[I_013]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]]
for.body: ; preds = %for.body, %entry
%i.013 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %b, i32 %i.013
- %i = load i32, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32, i32* %c, i32 %i.013
- %i1 = load i32, i32* %arrayidx1, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %b, i32 %i.013
+ %i = load i32, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr %c, i32 %i.013
+ %i1 = load i32, ptr %arrayidx1, align 4
%mul = mul nsw i32 %i1, %i
- %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 %i.013
- store i32 %mul, i32* %arrayidx2, align 4
- call i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([12 x i8], [12 x i8]* @.str, i32 0, i32 0), i32 %i.013, i32 %mul)
+ %arrayidx2 = getelementptr inbounds i32, ptr %a, i32 %i.013
+ store i32 %mul, ptr %arrayidx2, align 4
+ call i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str, i32 %i.013, i32 %mul)
%inc = add nuw i32 %i.013, 1
%exitcond.not = icmp eq i32 %inc, %N
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
}
-define hidden void @dont_unroll_optsize(i8* nocapture %a, i8* nocapture readonly %b) #0 {
+define hidden void @dont_unroll_optsize(ptr nocapture %a, ptr nocapture readonly %b) #0 {
; CHECK-LABEL: @dont_unroll_optsize(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[I_06:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[B:%.*]], i32 [[I_06]]
-; CHECK-NEXT: [[I:%.*]] = load i8, i8* [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[A:%.*]], i32 [[I_06]]
-; CHECK-NEXT: store i8 [[I]], i8* [[ARRAYIDX1]], align 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i32 [[I_06]]
+; CHECK-NEXT: [[I:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i32 [[I_06]]
+; CHECK-NEXT: store i8 [[I]], ptr [[ARRAYIDX1]], align 1
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_06]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], 10
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %b, i32 %i.06
- %i = load i8, i8* %arrayidx, align 1
- %arrayidx1 = getelementptr inbounds i8, i8* %a, i32 %i.06
- store i8 %i, i8* %arrayidx1, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %b, i32 %i.06
+ %i = load i8, ptr %arrayidx, align 1
+ %arrayidx1 = getelementptr inbounds i8, ptr %a, i32 %i.06
+ store i8 %i, ptr %arrayidx1, align 1
%inc = add nuw nsw i32 %i.06, 1
%exitcond.not = icmp eq i32 %inc, 10
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
}
-define hidden void @dont_unroll_minsize(i8* nocapture %a, i8* nocapture readonly %b) #1 {
+define hidden void @dont_unroll_minsize(ptr nocapture %a, ptr nocapture readonly %b) #1 {
; CHECK-LABEL: @dont_unroll_minsize(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[I_06:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[B:%.*]], i32 [[I_06]]
-; CHECK-NEXT: [[I:%.*]] = load i8, i8* [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[A:%.*]], i32 [[I_06]]
-; CHECK-NEXT: store i8 [[I]], i8* [[ARRAYIDX1]], align 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i32 [[I_06]]
+; CHECK-NEXT: [[I:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i32 [[I_06]]
+; CHECK-NEXT: store i8 [[I]], ptr [[ARRAYIDX1]], align 1
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_06]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], 10
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %b, i32 %i.06
- %i = load i8, i8* %arrayidx, align 1
- %arrayidx1 = getelementptr inbounds i8, i8* %a, i32 %i.06
- store i8 %i, i8* %arrayidx1, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %b, i32 %i.06
+ %i = load i8, ptr %arrayidx, align 1
+ %arrayidx1 = getelementptr inbounds i8, ptr %a, i32 %i.06
+ store i8 %i, ptr %arrayidx1, align 1
%inc = add nuw nsw i32 %i.06, 1
%exitcond.not = icmp eq i32 %inc, 10
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
attributes #1 = { minsize }
@.str = private unnamed_addr constant [12 x i8] c"a[%d] = %d\0A\00", align 1
-declare i32 @printf(i8* nocapture readonly, ...)
+declare i32 @printf(ptr nocapture readonly, ...)
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define void @foo(i32* noalias nocapture readnone %ip, double %alpha, double* noalias nocapture %a, double* noalias nocapture readonly %b) #0 {
+define void @foo(ptr noalias nocapture readnone %ip, double %alpha, ptr noalias nocapture %a, ptr noalias nocapture readonly %b) #0 {
entry:
br label %vector.body
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds double, double* %b, i64 %index
- %1 = bitcast double* %0 to <2 x double>*
- %wide.load = load <2 x double>, <2 x double>* %1, align 8
+ %0 = getelementptr inbounds double, ptr %b, i64 %index
+ %wide.load = load <2 x double>, ptr %0, align 8
%.sum9 = or i64 %index, 2
- %2 = getelementptr double, double* %b, i64 %.sum9
- %3 = bitcast double* %2 to <2 x double>*
- %wide.load8 = load <2 x double>, <2 x double>* %3, align 8
- %4 = fadd <2 x double> %wide.load, <double 1.000000e+00, double 1.000000e+00>
- %5 = fadd <2 x double> %wide.load8, <double 1.000000e+00, double 1.000000e+00>
- %6 = getelementptr inbounds double, double* %a, i64 %index
- %7 = bitcast double* %6 to <2 x double>*
- store <2 x double> %4, <2 x double>* %7, align 8
+ %1 = getelementptr double, ptr %b, i64 %.sum9
+ %wide.load8 = load <2 x double>, ptr %1, align 8
+ %2 = fadd <2 x double> %wide.load, <double 1.000000e+00, double 1.000000e+00>
+ %3 = fadd <2 x double> %wide.load8, <double 1.000000e+00, double 1.000000e+00>
+ %4 = getelementptr inbounds double, ptr %a, i64 %index
+ store <2 x double> %2, ptr %4, align 8
%.sum10 = or i64 %index, 2
- %8 = getelementptr double, double* %a, i64 %.sum10
- %9 = bitcast double* %8 to <2 x double>*
- store <2 x double> %5, <2 x double>* %9, align 8
+ %5 = getelementptr double, ptr %a, i64 %.sum10
+ store <2 x double> %3, ptr %5, align 8
%index.next = add i64 %index, 4
- %10 = icmp eq i64 %index.next, 1600
- br i1 %10, label %for.end, label %vector.body
+ %6 = icmp eq i64 %index.next, 1600
+ br i1 %6, label %for.end, label %vector.body
; FIXME: We should probably unroll this loop by a factor of 2, but the cost
; model needs to be fixed to account for instructions likely to be folded
ret void
}
-define void @bar(i32* noalias nocapture readnone %ip, double %alpha, double* noalias nocapture %a, double* noalias nocapture readonly %b) #0 {
+define void @bar(ptr noalias nocapture readnone %ip, double %alpha, ptr noalias nocapture %a, ptr noalias nocapture readonly %b) #0 {
entry:
br label %vector.body
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
- %v0 = getelementptr inbounds double, double* %b, i64 %index
- %v1 = bitcast double* %v0 to <2 x double>*
- %wide.load = load <2 x double>, <2 x double>* %v1, align 8
+ %v0 = getelementptr inbounds double, ptr %b, i64 %index
+ %wide.load = load <2 x double>, ptr %v0, align 8
%v4 = fadd <2 x double> %wide.load, <double 1.000000e+00, double 1.000000e+00>
%v5 = fmul <2 x double> %v4, <double 8.000000e+00, double 8.000000e+00>
- %v6 = getelementptr inbounds double, double* %a, i64 %index
- %v7 = bitcast double* %v6 to <2 x double>*
- store <2 x double> %v5, <2 x double>* %v7, align 8
+ %v6 = getelementptr inbounds double, ptr %a, i64 %index
+ store <2 x double> %v5, ptr %v6, align 8
%index.next = add i64 %index, 2
%v10 = icmp eq i64 %index.next, 1600
br i1 %v10, label %for.end, label %vector.body
ret void
}
-define zeroext i16 @test1(i16* nocapture readonly %arr, i32 %n) #0 {
+define zeroext i16 @test1(ptr nocapture readonly %arr, i32 %n) #0 {
entry:
%cmp25 = icmp eq i32 %n, 0
br i1 %cmp25, label %for.end, label %for.body
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%reduction.026 = phi i16 [ %add14, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i16, i16* %arr, i64 %indvars.iv
- %0 = load i16, i16* %arrayidx, align 2
+ %arrayidx = getelementptr inbounds i16, ptr %arr, i64 %indvars.iv
+ %0 = load i16, ptr %arrayidx, align 2
%mul = shl i16 %0, 1
%add = add i16 %mul, %reduction.026
%sext = mul i64 %indvars.iv, 12884901888
%idxprom3 = ashr exact i64 %sext, 32
- %arrayidx4 = getelementptr inbounds i16, i16* %arr, i64 %idxprom3
- %1 = load i16, i16* %arrayidx4, align 2
+ %arrayidx4 = getelementptr inbounds i16, ptr %arr, i64 %idxprom3
+ %1 = load i16, ptr %arrayidx4, align 2
%mul2 = shl i16 %1, 1
%add7 = add i16 %add, %mul2
%sext28 = mul i64 %indvars.iv, 21474836480
%idxprom10 = ashr exact i64 %sext28, 32
- %arrayidx11 = getelementptr inbounds i16, i16* %arr, i64 %idxprom10
- %2 = load i16, i16* %arrayidx11, align 2
+ %arrayidx11 = getelementptr inbounds i16, ptr %arr, i64 %idxprom10
+ %2 = load i16, ptr %arrayidx11, align 2
%mul3 = shl i16 %2, 1
%add14 = add i16 %add7, %mul3
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
; CHECK: Loop Size = 27
; CHECK-NOT: UNROLLING loop %loop.2.header
-define void @foo(i32 * %out) {
+define void @foo(ptr %out) {
entry:
%0 = alloca [1024 x i32]
%x0 = alloca [1024 x i32]
br label %loop.body
loop.body:
- %ptr = getelementptr [1024 x i32], [1024 x i32]* %0, i32 0, i32 %counter
- store i32 %counter, i32* %ptr
+ %ptr = getelementptr [1024 x i32], ptr %0, i32 0, i32 %counter
+ store i32 %counter, ptr %ptr
%val = add i32 %counter, 5
- %xptr = getelementptr [1024 x i32], [1024 x i32]* %x0, i32 0, i32 %counter
- store i32 %val, i32* %xptr
+ %xptr = getelementptr [1024 x i32], ptr %x0, i32 0, i32 %counter
+ store i32 %val, ptr %xptr
%val1 = add i32 %counter, 6
- %xptr1 = getelementptr [1024 x i32], [1024 x i32]* %x01, i32 0, i32 %counter
- store i32 %val1, i32* %xptr1
+ %xptr1 = getelementptr [1024 x i32], ptr %x01, i32 0, i32 %counter
+ store i32 %val1, ptr %xptr1
%val2 = add i32 %counter, 7
- %xptr2 = getelementptr [1024 x i32], [1024 x i32]* %x02, i32 0, i32 %counter
- store i32 %val2, i32* %xptr2
+ %xptr2 = getelementptr [1024 x i32], ptr %x02, i32 0, i32 %counter
+ store i32 %val2, ptr %xptr2
%val3 = add i32 %counter, 8
- %xptr3 = getelementptr [1024 x i32], [1024 x i32]* %x03, i32 0, i32 %counter
- store i32 %val3, i32* %xptr3
+ %xptr3 = getelementptr [1024 x i32], ptr %x03, i32 0, i32 %counter
+ store i32 %val3, ptr %xptr3
%val4 = add i32 %counter, 9
- %xptr4 = getelementptr [1024 x i32], [1024 x i32]* %x04, i32 0, i32 %counter
- store i32 %val4, i32* %xptr4
+ %xptr4 = getelementptr [1024 x i32], ptr %x04, i32 0, i32 %counter
+ store i32 %val4, ptr %xptr4
%val5 = add i32 %counter, 10
- %xptr5 = getelementptr [1024 x i32], [1024 x i32]* %x05, i32 0, i32 %counter
- store i32 %val5, i32* %xptr5
+ %xptr5 = getelementptr [1024 x i32], ptr %x05, i32 0, i32 %counter
+ store i32 %val5, ptr %xptr5
br label %loop.inc
loop.inc:
br i1 %1, label %exit.0, label %loop.header
exit.0:
- %2 = getelementptr [1024 x i32], [1024 x i32]* %0, i32 0, i32 5
- %3 = load i32, i32* %2
- store i32 %3, i32 * %out
+ %2 = getelementptr [1024 x i32], ptr %0, i32 0, i32 5
+ %3 = load i32, ptr %2
+ store i32 %3, ptr %out
br label %loop.2.header
br label %loop.2.body
loop.2.body:
- %ptr.2 = getelementptr [1024 x i32], [1024 x i32]* %0, i32 0, i32 %counter.2
- store i32 %counter.2, i32* %ptr.2
+ %ptr.2 = getelementptr [1024 x i32], ptr %0, i32 0, i32 %counter.2
+ store i32 %counter.2, ptr %ptr.2
%val.2 = add i32 %counter.2, 5
- %xptr.2 = getelementptr [1024 x i32], [1024 x i32]* %x0, i32 0, i32 %counter.2
- store i32 %val.2, i32* %xptr.2
+ %xptr.2 = getelementptr [1024 x i32], ptr %x0, i32 0, i32 %counter.2
+ store i32 %val.2, ptr %xptr.2
%val1.2 = add i32 %counter.2, 6
- %xptr1.2 = getelementptr [1024 x i32], [1024 x i32]* %x01, i32 0, i32 %counter.2
- store i32 %val1, i32* %xptr1.2
+ %xptr1.2 = getelementptr [1024 x i32], ptr %x01, i32 0, i32 %counter.2
+ store i32 %val1, ptr %xptr1.2
%val2.2 = add i32 %counter.2, 7
- %xptr2.2 = getelementptr [1024 x i32], [1024 x i32]* %x02, i32 0, i32 %counter.2
- store i32 %val2, i32* %xptr2.2
+ %xptr2.2 = getelementptr [1024 x i32], ptr %x02, i32 0, i32 %counter.2
+ store i32 %val2, ptr %xptr2.2
%val3.2 = add i32 %counter.2, 8
- %xptr3.2 = getelementptr [1024 x i32], [1024 x i32]* %x03, i32 0, i32 %counter.2
- store i32 %val3.2, i32* %xptr3.2
+ %xptr3.2 = getelementptr [1024 x i32], ptr %x03, i32 0, i32 %counter.2
+ store i32 %val3.2, ptr %xptr3.2
%val4.2 = add i32 %counter.2, 9
- %xptr4.2 = getelementptr [1024 x i32], [1024 x i32]* %x04, i32 0, i32 %counter.2
- store i32 %val4.2, i32* %xptr4.2
+ %xptr4.2 = getelementptr [1024 x i32], ptr %x04, i32 0, i32 %counter.2
+ store i32 %val4.2, ptr %xptr4.2
%val5.2 = add i32 %counter.2, 10
- %xptr5.2 = getelementptr [1024 x i32], [1024 x i32]* %x05, i32 0, i32 %counter.2
- store i32 %val5.2, i32* %xptr5.2
- %xptr6.2 = getelementptr [1024 x i32], [1024 x i32]* %x06, i32 0, i32 %counter.2
- store i32 %val5.2, i32* %xptr6.2
+ %xptr5.2 = getelementptr [1024 x i32], ptr %x05, i32 0, i32 %counter.2
+ store i32 %val5.2, ptr %xptr5.2
+ %xptr6.2 = getelementptr [1024 x i32], ptr %x06, i32 0, i32 %counter.2
+ store i32 %val5.2, ptr %xptr6.2
br label %loop.2.inc
loop.2.inc:
br i1 %4, label %exit.2, label %loop.2.header
exit.2:
- %x2 = getelementptr [1024 x i32], [1024 x i32]* %0, i32 0, i32 6
- %x3 = load i32, i32* %x2
- %out2 = getelementptr i32, i32 * %out, i32 1
- store i32 %3, i32 * %out2
+ %x2 = getelementptr [1024 x i32], ptr %0, i32 0, i32 6
+ %x3 = load i32, ptr %x2
+ %out2 = getelementptr i32, ptr %out, i32 1
+ store i32 %3, ptr %out2
ret void
}
; This should not unroll since the address of the loop header is taken.
; CHECK-LABEL: @test1(
-; CHECK: store i8* blockaddress(@test1, %l1), i8** %P
+; CHECK: store ptr blockaddress(@test1, %l1), ptr %P
; CHECK: l1:
; CHECK-NEXT: phi i32
; rdar://8287027
-define i32 @test1(i8** %P) nounwind ssp {
+define i32 @test1(ptr %P) nounwind ssp {
entry:
- store i8* blockaddress(@test1, %l1), i8** %P
+ store ptr blockaddress(@test1, %l1), ptr %P
br label %l1
l1: ; preds = %l1, %entry
; This should not unroll since the call is 'noduplicate'.
; CHECK-LABEL: @test2(
-define i32 @test2(i8** %P) nounwind ssp {
+define i32 @test2(ptr %P) nounwind ssp {
entry:
br label %l1
; Function Attrs: nounwind
declare void @llvm.assume(i1) #1
-define i32 @foo(i32* %a) {
+define i32 @foo(ptr %a) {
; ANALYZE-FULL-LABEL: @foo(
; ANALYZE-FULL-NEXT: entry:
; ANALYZE-FULL-NEXT: br label [[FOR_BODY:%.*]]
; ANALYZE-FULL: for.body:
; ANALYZE-FULL-NEXT: br i1 true, label [[DO_STORE:%.*]], label [[FOR_NEXT:%.*]]
; ANALYZE-FULL: do_store:
-; ANALYZE-FULL-NEXT: store i32 0, i32* [[A:%.*]], align 4
+; ANALYZE-FULL-NEXT: store i32 0, ptr [[A:%.*]], align 4
; ANALYZE-FULL-NEXT: br label [[FOR_NEXT]]
; ANALYZE-FULL: for.next:
; ANALYZE-FULL-NEXT: br i1 true, label [[DO_STORE_1:%.*]], label [[FOR_NEXT_1:%.*]]
; ANALYZE-FULL: do_store.1:
-; ANALYZE-FULL-NEXT: [[GEP_1:%.*]] = getelementptr i32, i32* [[A]], i32 1
-; ANALYZE-FULL-NEXT: store i32 1, i32* [[GEP_1]], align 4
+; ANALYZE-FULL-NEXT: [[GEP_1:%.*]] = getelementptr i32, ptr [[A]], i32 1
+; ANALYZE-FULL-NEXT: store i32 1, ptr [[GEP_1]], align 4
; ANALYZE-FULL-NEXT: br label [[FOR_NEXT_1]]
; ANALYZE-FULL: for.next.1:
; ANALYZE-FULL-NEXT: br i1 true, label [[DO_STORE_2:%.*]], label [[FOR_NEXT_2:%.*]]
; ANALYZE-FULL: do_store.2:
-; ANALYZE-FULL-NEXT: [[GEP_2:%.*]] = getelementptr i32, i32* [[A]], i32 2
-; ANALYZE-FULL-NEXT: store i32 2, i32* [[GEP_2]], align 4
+; ANALYZE-FULL-NEXT: [[GEP_2:%.*]] = getelementptr i32, ptr [[A]], i32 2
+; ANALYZE-FULL-NEXT: store i32 2, ptr [[GEP_2]], align 4
; ANALYZE-FULL-NEXT: br label [[FOR_NEXT_2]]
; ANALYZE-FULL: for.next.2:
; ANALYZE-FULL-NEXT: br i1 true, label [[DO_STORE_3:%.*]], label [[FOR_NEXT_3:%.*]]
; ANALYZE-FULL: do_store.3:
-; ANALYZE-FULL-NEXT: [[GEP_3:%.*]] = getelementptr i32, i32* [[A]], i32 3
-; ANALYZE-FULL-NEXT: store i32 3, i32* [[GEP_3]], align 4
+; ANALYZE-FULL-NEXT: [[GEP_3:%.*]] = getelementptr i32, ptr [[A]], i32 3
+; ANALYZE-FULL-NEXT: store i32 3, ptr [[GEP_3]], align 4
; ANALYZE-FULL-NEXT: br label [[FOR_NEXT_3]]
; ANALYZE-FULL: for.next.3:
; ANALYZE-FULL-NEXT: br i1 false, label [[DO_STORE_4:%.*]], label [[FOR_NEXT_4:%.*]]
; ANALYZE-FULL: do_store.4:
-; ANALYZE-FULL-NEXT: [[GEP_4:%.*]] = getelementptr i32, i32* [[A]], i32 4
-; ANALYZE-FULL-NEXT: store i32 4, i32* [[GEP_4]], align 4
+; ANALYZE-FULL-NEXT: [[GEP_4:%.*]] = getelementptr i32, ptr [[A]], i32 4
+; ANALYZE-FULL-NEXT: store i32 4, ptr [[GEP_4]], align 4
; ANALYZE-FULL-NEXT: br label [[FOR_NEXT_4]]
; ANALYZE-FULL: for.next.4:
; ANALYZE-FULL-NEXT: br i1 false, label [[DO_STORE_5:%.*]], label [[FOR_NEXT_5:%.*]]
; ANALYZE-FULL: do_store.5:
-; ANALYZE-FULL-NEXT: [[GEP_5:%.*]] = getelementptr i32, i32* [[A]], i32 5
-; ANALYZE-FULL-NEXT: store i32 5, i32* [[GEP_5]], align 4
+; ANALYZE-FULL-NEXT: [[GEP_5:%.*]] = getelementptr i32, ptr [[A]], i32 5
+; ANALYZE-FULL-NEXT: store i32 5, ptr [[GEP_5]], align 4
; ANALYZE-FULL-NEXT: br label [[FOR_NEXT_5]]
; ANALYZE-FULL: for.next.5:
; ANALYZE-FULL-NEXT: br i1 false, label [[DO_STORE_6:%.*]], label [[FOR_NEXT_6:%.*]]
; ANALYZE-FULL: do_store.6:
-; ANALYZE-FULL-NEXT: [[GEP_6:%.*]] = getelementptr i32, i32* [[A]], i32 6
-; ANALYZE-FULL-NEXT: store i32 6, i32* [[GEP_6]], align 4
+; ANALYZE-FULL-NEXT: [[GEP_6:%.*]] = getelementptr i32, ptr [[A]], i32 6
+; ANALYZE-FULL-NEXT: store i32 6, ptr [[GEP_6]], align 4
; ANALYZE-FULL-NEXT: br label [[FOR_NEXT_6]]
; ANALYZE-FULL: for.next.6:
; ANALYZE-FULL-NEXT: br i1 false, label [[DO_STORE_7:%.*]], label [[FOR_NEXT_7:%.*]]
; ANALYZE-FULL: do_store.7:
-; ANALYZE-FULL-NEXT: [[GEP_7:%.*]] = getelementptr i32, i32* [[A]], i32 7
-; ANALYZE-FULL-NEXT: store i32 7, i32* [[GEP_7]], align 4
+; ANALYZE-FULL-NEXT: [[GEP_7:%.*]] = getelementptr i32, ptr [[A]], i32 7
+; ANALYZE-FULL-NEXT: store i32 7, ptr [[GEP_7]], align 4
; ANALYZE-FULL-NEXT: br label [[FOR_NEXT_7]]
; ANALYZE-FULL: for.next.7:
; ANALYZE-FULL-NEXT: br i1 false, label [[DO_STORE_8:%.*]], label [[FOR_NEXT_8:%.*]]
; ANALYZE-FULL: do_store.8:
-; ANALYZE-FULL-NEXT: [[GEP_8:%.*]] = getelementptr i32, i32* [[A]], i32 8
-; ANALYZE-FULL-NEXT: store i32 8, i32* [[GEP_8]], align 4
+; ANALYZE-FULL-NEXT: [[GEP_8:%.*]] = getelementptr i32, ptr [[A]], i32 8
+; ANALYZE-FULL-NEXT: store i32 8, ptr [[GEP_8]], align 4
; ANALYZE-FULL-NEXT: br label [[FOR_NEXT_8]]
; ANALYZE-FULL: for.next.8:
; ANALYZE-FULL-NEXT: ret i32 9
; DONT-ANALYZE-FULL-NEXT: [[CMP2:%.*]] = icmp ule i32 [[INDVAR]], 3
; DONT-ANALYZE-FULL-NEXT: br i1 [[CMP2]], label [[DO_STORE:%.*]], label [[FOR_NEXT]]
; DONT-ANALYZE-FULL: do_store:
-; DONT-ANALYZE-FULL-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[A:%.*]], i32 [[INDVAR]]
-; DONT-ANALYZE-FULL-NEXT: store i32 [[INDVAR]], i32* [[GEP]], align 4
+; DONT-ANALYZE-FULL-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[A:%.*]], i32 [[INDVAR]]
+; DONT-ANALYZE-FULL-NEXT: store i32 [[INDVAR]], ptr [[GEP]], align 4
; DONT-ANALYZE-FULL-NEXT: br label [[FOR_NEXT]]
; DONT-ANALYZE-FULL: for.next:
; DONT-ANALYZE-FULL-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INDVAR_NEXT]], 9
br i1 %cmp2, label %do_store, label %for.next
do_store:
- %gep = getelementptr i32, i32* %a, i32 %indvar
- store i32 %indvar, i32* %gep
+ %gep = getelementptr i32, ptr %a, i32 %indvar
+ store i32 %indvar, ptr %gep
br label %for.next
for.next:
; CHECK: call void @llvm.dbg.value(metadata i32 16, metadata !12, metadata !DIExpression()), !dbg !15
; CHECK: call void @llvm.dbg.value(metadata i32 64, metadata !12, metadata !DIExpression()), !dbg !15
- %call = tail call i32 (i32, ...) bitcast (i32 (...)* @bar to i32 (i32, ...)*)(i32 %shr) #3, !dbg !20
+ %call = tail call i32 (i32, ...) @bar(i32 %shr) #3, !dbg !20
%shl = shl i32 %i.04, 2, !dbg !21
tail call void @llvm.dbg.value(metadata i32 %shl, metadata !12, metadata !DIExpression()), !dbg !15
%cmp = icmp slt i32 %shl, 33, !dbg !22
; CHECK-LABEL: @forced(
; CHECK: load
; CHECK: load
-define void @forced(i32* nocapture %a) {
+define void @forced(ptr nocapture %a) {
entry:
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 64
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0
; CHECK-LABEL: @disable_nonforced(
; CHECK: load
; CHECK-NOT: load
-define void @disable_nonforced(i32* nocapture %a) {
+define void @disable_nonforced(ptr nocapture %a) {
entry:
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 64
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0
; CHECK: store
; CHECK: store
; CHECK-NOT: store
-define void @disable_nonforced_count(i32* nocapture %a) {
+define void @disable_nonforced_count(ptr nocapture %a) {
entry:
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 64
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0
; CHECK: store
; CHECK: store
; CHECK-NOT: store
-define void @disable_nonforced_enable(i32* nocapture %a) {
+define void @disable_nonforced_enable(ptr nocapture %a) {
entry:
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 64
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0
; CHECK: store
; CHECK: store
; CHECK-NOT: store
-define void @disable_nonforced_full(i32* nocapture %a) {
+define void @disable_nonforced_full(ptr nocapture %a) {
entry:
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 4
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0
; CHECK: for.body:
; CHECK-NOT: for.end:
-define i32 @test1(i32* nocapture %a) nounwind uwtable readonly {
+define i32 @test1(ptr nocapture %a) nounwind uwtable readonly {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.01 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
; This loop will be completely unrolled, even with these extra instructions,
; but only because they're ephemeral (and, thus, free).
; CHECK: for.body.epil
; Function Attrs: norecurse nounwind uwtable
-define void @const_phi_val(i32 %i0, i32* nocapture %a) {
+define void @const_phi_val(i32 %i0, ptr nocapture %a) {
entry:
%cmp6 = icmp slt i32 %i0, 1000
br i1 %cmp6, label %for.body.preheader, label %for.end
for.body: ; preds = %for.body, %for.body.preheader
%indvars.iv = phi i64 [ %tmp, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
%s.08 = phi i32 [ 0, %for.body.preheader ], [ %xor, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- store i32 %s.08, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ store i32 %s.08, ptr %arrayidx, align 4
%xor = xor i32 %s.08, 1
%indvars.iv.next = add nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 1000
; CHECK: for.body.prol
; Function Attrs: norecurse nounwind uwtable
-define void @var_phi_val(i32 %i0, i32* nocapture %a) {
+define void @var_phi_val(i32 %i0, ptr nocapture %a) {
entry:
%cmp6 = icmp slt i32 %i0, 1000
br i1 %cmp6, label %for.body.preheader, label %for.end
for.body: ; preds = %for.body, %for.body.preheader
%indvars.iv = phi i64 [ %tmp, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
%indvars.iv.next = add nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 1000
br i1 %exitcond, label %for.end.loopexit, label %for.body
;
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-define i32 @test(i32* nocapture %a, i32 %n) nounwind uwtable readonly {
+define i32 @test(ptr nocapture %a, i32 %n) nounwind uwtable readonly {
entry:
%cmp1 = icmp eq i32 %n, 0
br i1 %cmp1, label %for.end, label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body:
%phi = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%idx = zext i32 undef to i64
- %add.ptr = getelementptr inbounds i64, i64* null, i64 %idx
+ %add.ptr = getelementptr inbounds i64, ptr null, i64 %idx
%inc = add nuw nsw i64 %phi, 1
%cmp = icmp ult i64 %inc, 999
br i1 %cmp, label %for.body, label %for.exit
for.body:
%phi = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %x = getelementptr i32, <4 x i32*> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %x = getelementptr i32, <4 x ptr> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%inc = add nuw nsw i64 %phi, 1
%cmp = icmp ult i64 %inc, 999
br i1 %cmp, label %for.body, label %for.exit
for.body: ; preds = %for.inc, %entry
%iv.0 = phi i64 [ 0, %entry ], [ %iv.1, %for.inc ]
- %arrayidx1 = getelementptr inbounds [10 x i32], [10 x i32]* @known_constant, i64 0, i64 %iv.0
- %x1 = load i32, i32* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds [10 x i32], ptr @known_constant, i64 0, i64 %iv.0
+ %x1 = load i32, ptr %arrayidx1, align 4
%cmp = icmp eq i32 %x1, undef
br i1 %cmp, label %if.then, label %for.inc
for.body:
%iv.0 = phi i64 [ 0, %entry ], [ %iv.1, %for.inc ]
- %arrayidx1 = getelementptr inbounds [10 x i32], [10 x i32]* @known_constant, i64 0, i64 %iv.0
- %x1 = load i32, i32* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds [10 x i32], ptr @known_constant, i64 0, i64 %iv.0
+ %x1 = load i32, ptr %arrayidx1, align 4
switch i32 %x1, label %l1 [
]
for.body:
%phi = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%vec_phi = phi <4 x i32> [ <i32 0, i32 0, i32 0, i32 0>, %entry ], [ %r, %for.body ]
- %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* @known_constant, i64 0, i64 %phi
- %bc = bitcast i32* %arrayidx to <4 x i32>*
- %x = load <4 x i32>, < 4 x i32>* %bc, align 4
+ %arrayidx = getelementptr inbounds [10 x i32], ptr @known_constant, i64 0, i64 %phi
+ %x = load <4 x i32>, < 4 x i32>* %arrayidx, align 4
%r = add <4 x i32> %x, %vec_phi
%inc = add nuw nsw i64 %phi, 1
%cmp = icmp ult i64 %inc, 999
br i1 true, label %for.inc, label %if.then
if.then:
- %arraydecay = getelementptr inbounds [1 x i32], [1 x i32]* null, i64 0, i64 0
- %x = ptrtoint i32* %arraydecay to i64
+ %x = ptrtoint ptr null to i64
br label %for.inc
for.inc:
for.body:
%iv = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
- %offset = getelementptr inbounds float, float* null, i32 3
- %bc = bitcast float* %offset to i64*
+ %offset = getelementptr inbounds float, ptr null, i32 3
%inc = add nuw nsw i32 %iv, 1
br i1 false, label %for.body, label %exit
for.body:
%iv = phi i32 [ 0, %entry ], [ %inc, %for.inc ]
- %m = phi i32* [ @i, %entry ], [ %m, %for.inc ]
+ %m = phi ptr [ @i, %entry ], [ %m, %for.inc ]
br i1 undef, label %if.else, label %if.then
if.then:
unreachable
if.else:
- %cmp = icmp ult i32* %m, null
+ %cmp = icmp ult ptr %m, null
br i1 %cmp, label %cond.false, label %for.inc
cond.false:
br i1 undef, label %for.body2, label %for.inc
for.body2:
- %idx = getelementptr inbounds [10 x i32], [10 x i32]* @known_constant, i64 0, i64 %iv
- %x = load i32, i32* %idx, align 1
+ %idx = getelementptr inbounds [10 x i32], ptr @known_constant, i64 0, i64 %iv
+ %x = load i32, ptr %idx, align 1
br label %for.inc
for.inc:
br label %for.body
for.body:
- %d = phi i32* [ null, %for.header ]
- %cmp = icmp eq i32* %d, null
+ %d = phi ptr [ null, %for.header ]
+ %cmp = icmp eq ptr %d, null
br i1 undef, label %for.end, label %for.header
for.end:
for.body:
%iv.0 = phi i64 [ 0, %entry ], [ %iv.1, %for.body ]
- %arrayidx1 = getelementptr inbounds [10 x i32], [10 x i32]* @known_constant, i64 0, i64 %iv.0
- %bc = bitcast i32* %arrayidx1 to i64*
- %x1 = load i64, i64* %bc, align 4
+ %arrayidx1 = getelementptr inbounds [10 x i32], ptr @known_constant, i64 0, i64 %iv.0
+ %x1 = load i64, ptr %arrayidx1, align 4
%x2 = add i64 10, %x1
%iv.1 = add nuw nsw i64 %iv.0, 1
%exitcond = icmp eq i64 %iv.1, 10
; Though @unknown_global is initialized with constant values, we can't consider
; it as a constant, so we shouldn't unroll the loop.
; CHECK-LABEL: @foo
-; CHECK: %array_const_idx = getelementptr inbounds [9 x i32], [9 x i32]* @unknown_global, i64 0, i64 %iv
-define i32 @foo(i32* noalias nocapture readonly %src) {
+; CHECK: %array_const_idx = getelementptr inbounds [9 x i32], ptr @unknown_global, i64 0, i64 %iv
+define i32 @foo(ptr noalias nocapture readonly %src) {
entry:
br label %loop
loop: ; preds = %loop, %entry
%iv = phi i64 [ 0, %entry ], [ %inc, %loop ]
%r = phi i32 [ 0, %entry ], [ %add, %loop ]
- %arrayidx = getelementptr inbounds i32, i32* %src, i64 %iv
- %src_element = load i32, i32* %arrayidx, align 4
- %array_const_idx = getelementptr inbounds [9 x i32], [9 x i32]* @unknown_global, i64 0, i64 %iv
- %const_array_element = load i32, i32* %array_const_idx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %src, i64 %iv
+ %src_element = load i32, ptr %arrayidx, align 4
+ %array_const_idx = getelementptr inbounds [9 x i32], ptr @unknown_global, i64 0, i64 %iv
+ %const_array_element = load i32, ptr %array_const_idx, align 4
%mul = mul nsw i32 %src_element, %const_array_element
%add = add nsw i32 %mul, %r
%inc = add nuw nsw i64 %iv, 1
; Similarly, we can't consider 'weak' symbols as a known constant value, so we
; shouldn't unroll the loop.
; CHECK-LABEL: @foo2
-; CHECK: %array_const_idx = getelementptr inbounds [9 x i32], [9 x i32]* @weak_constant, i64 0, i64 %iv
-define i32 @foo2(i32* noalias nocapture readonly %src) {
+; CHECK: %array_const_idx = getelementptr inbounds [9 x i32], ptr @weak_constant, i64 0, i64 %iv
+define i32 @foo2(ptr noalias nocapture readonly %src) {
entry:
br label %loop
loop: ; preds = %loop, %entry
%iv = phi i64 [ 0, %entry ], [ %inc, %loop ]
%r = phi i32 [ 0, %entry ], [ %add, %loop ]
- %arrayidx = getelementptr inbounds i32, i32* %src, i64 %iv
- %src_element = load i32, i32* %arrayidx, align 4
- %array_const_idx = getelementptr inbounds [9 x i32], [9 x i32]* @weak_constant, i64 0, i64 %iv
- %const_array_element = load i32, i32* %array_const_idx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %src, i64 %iv
+ %src_element = load i32, ptr %arrayidx, align 4
+ %array_const_idx = getelementptr inbounds [9 x i32], ptr @weak_constant, i64 0, i64 %iv
+ %const_array_element = load i32, ptr %array_const_idx, align 4
%mul = mul nsw i32 %src_element, %const_array_element
%add = add nsw i32 %mul, %r
%inc = add nuw nsw i64 %iv, 1
; clean up almost entire loop. Make sure that we do not unroll such loop.
; CHECK-LABEL: @foo3
; CHECK: br i1 %exitcond, label %loop.end, label %loop.header
-define i32 @foo3(i32* noalias nocapture readonly %src) {
+define i32 @foo3(ptr noalias nocapture readonly %src) {
entry:
br label %loop.header
loop.header:
%iv = phi i64 [ 0, %entry ], [ %inc, %loop.latch ]
%r1 = phi i32 [ 0, %entry ], [ %r3, %loop.latch ]
- %arrayidx = getelementptr inbounds i32, i32* %src, i64 %iv
- %src_element = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %src, i64 %iv
+ %src_element = load i32, ptr %arrayidx, align 4
%cmp = icmp eq i32 0, %src_element
br i1 %cmp, label %loop.if, label %loop.latch
; CHECK-LABEL: @branch_folded
; CHECK-NOT: br i1 %
; CHECK: ret i32
-define i32 @branch_folded(i32* noalias nocapture readonly %b) {
+define i32 @branch_folded(ptr noalias nocapture readonly %b) {
entry:
br label %for.body
for.body: ; preds = %for.inc, %entry
%iv.0 = phi i64 [ 0, %entry ], [ %iv.1, %for.inc ]
%r.0 = phi i32 [ 0, %entry ], [ %r.1, %for.inc ]
- %arrayidx1 = getelementptr inbounds [10 x i32], [10 x i32]* @known_constant, i64 0, i64 %iv.0
- %x1 = load i32, i32* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds [10 x i32], ptr @known_constant, i64 0, i64 %iv.0
+ %x1 = load i32, ptr %arrayidx1, align 4
%cmp = icmp eq i32 %x1, 0
%iv.1 = add nuw nsw i64 %iv.0, 1
br i1 %cmp, label %if.then, label %for.inc
if.then: ; preds = %for.body
- %arrayidx2 = getelementptr inbounds i32, i32* %b, i64 %iv.0
- %x2 = load i32, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %b, i64 %iv.0
+ %x2 = load i32, ptr %arrayidx2, align 4
%add = add nsw i32 %x2, %r.0
br label %for.inc
br label %while.body
while.body:
- %iv.0 = phi i32* [ getelementptr inbounds ([10 x i32], [10 x i32]* @known_constant, i64 0, i64 0), %entry ], [ %iv.1, %while.body ]
- %iv.1 = getelementptr inbounds i32, i32* %iv.0, i64 1
- %exitcond = icmp eq i32* %iv.1, getelementptr inbounds ([10 x i32], [10 x i32]* @known_constant, i64 0, i64 9)
+ %iv.0 = phi ptr [ @known_constant, %entry ], [ %iv.1, %while.body ]
+ %iv.1 = getelementptr inbounds i32, ptr %iv.0, i64 1
+ %exitcond = icmp eq ptr %iv.1, getelementptr inbounds ([10 x i32], ptr @known_constant, i64 0, i64 9)
br i1 %exitcond, label %loop.exit, label %while.body
loop.exit:
; Check that we don't crash when we analyze ptrtoint cast.
; CHECK-LABEL: @ptrtoint_cast_crash
; CHECK: ret void
-define void @ptrtoint_cast_crash(i8 * %a) {
+define void @ptrtoint_cast_crash(ptr %a) {
entry:
- %limit = getelementptr i8, i8* %a, i64 512
+ %limit = getelementptr i8, ptr %a, i64 512
br label %loop.body
loop.body:
- %iv.0 = phi i8* [ %a, %entry ], [ %iv.1, %loop.body ]
- %cast = ptrtoint i8* %iv.0 to i64
- %iv.1 = getelementptr inbounds i8, i8* %iv.0, i64 1
- %exitcond = icmp ne i8* %iv.1, %limit
+ %iv.0 = phi ptr [ %a, %entry ], [ %iv.1, %loop.body ]
+ %cast = ptrtoint ptr %iv.0 to i64
+ %iv.1 = getelementptr inbounds i8, ptr %iv.0, i64 1
+ %exitcond = icmp ne ptr %iv.1, %limit
br i1 %exitcond, label %loop.body, label %loop.exit
loop.exit:
; instruction is simplified, the other operand might become dead.
; In this test we have::
; for i in 1..10:
-; r += A[i] * B[i]
+; r += Aptr B[i]
; A[i] is 0 almost at every iteration, so there is no need in loading B[i] at
; all.
; CHECK-LABEL: @unroll_dce
; CHECK-NOT: br i1 %exitcond, label %for.end, label %for.body
-define i32 @unroll_dce(i32* noalias nocapture readonly %b) {
+define i32 @unroll_dce(ptr noalias nocapture readonly %b) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%iv.0 = phi i64 [ 0, %entry ], [ %iv.1, %for.body ]
%r.0 = phi i32 [ 0, %entry ], [ %r.1, %for.body ]
- %arrayidx1 = getelementptr inbounds [10 x i32], [10 x i32]* @known_constant, i64 0, i64 %iv.0
- %x1 = load i32, i32* %arrayidx1, align 4
- %arrayidx2 = getelementptr inbounds i32, i32* %b, i64 %iv.0
- %x2 = load i32, i32* %arrayidx2, align 4
+ %arrayidx1 = getelementptr inbounds [10 x i32], ptr @known_constant, i64 0, i64 %iv.0
+ %x1 = load i32, ptr %arrayidx1, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %b, i64 %iv.0
+ %x2 = load i32, ptr %arrayidx2, align 4
%mul = mul i32 %x1, %x2
%r.1 = add i32 %mul, %r.0
%iv.1 = add nuw nsw i64 %iv.0, 1
; CHECK-LABEL: @not_simplified_geps
; CHECK: br i1 %
; CHECK: ret void
-define void @not_simplified_geps(i32* noalias %b, i32* noalias %c) {
+define void @not_simplified_geps(ptr noalias %b, ptr noalias %c) {
entry:
br label %for.body
for.body:
%iv.0 = phi i64 [ 0, %entry ], [ %iv.1, %for.body ]
- %arrayidx1 = getelementptr inbounds i32, i32* %b, i64 %iv.0
- %x1 = load i32, i32* %arrayidx1, align 4
- %arrayidx2 = getelementptr inbounds i32, i32* %c, i64 %iv.0
- store i32 %x1, i32* %arrayidx2, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr %b, i64 %iv.0
+ %x1 = load i32, ptr %arrayidx1, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %c, i64 %iv.0
+ store i32 %x1, ptr %arrayidx2, align 4
%iv.1 = add nuw nsw i64 %iv.0, 1
%exitcond = icmp eq i64 %iv.1, 10
br i1 %exitcond, label %for.end, label %for.body
; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop-unroll' -unroll-max-iteration-count-to-analyze=1000 -unroll-threshold=20 -unroll-max-percent-threshold-boost=100 | FileCheck %s -check-prefix=TEST3
; If the absolute threshold is too low, we should not unroll:
-; TEST1: %array_const_idx = getelementptr inbounds [9 x i32], [9 x i32]* @known_constant, i64 0, i64 %iv
+; TEST1: %array_const_idx = getelementptr inbounds [9 x i32], ptr @known_constant, i64 0, i64 %iv
; Otherwise, we should:
-; TEST2-NOT: %array_const_idx = getelementptr inbounds [9 x i32], [9 x i32]* @known_constant, i64 0, i64 %iv
+; TEST2-NOT: %array_const_idx = getelementptr inbounds [9 x i32], ptr @known_constant, i64 0, i64 %iv
; If we do not boost threshold, the unroll will not happen:
-; TEST3: %array_const_idx = getelementptr inbounds [9 x i32], [9 x i32]* @known_constant, i64 0, i64 %iv
+; TEST3: %array_const_idx = getelementptr inbounds [9 x i32], ptr @known_constant, i64 0, i64 %iv
; And check that we don't crash when we're not allowed to do any analysis.
; RUN: opt < %s -passes=loop-unroll -unroll-max-iteration-count-to-analyze=0 -disable-output
@known_constant = internal unnamed_addr constant [9 x i32] [i32 0, i32 -1, i32 0, i32 -1, i32 5, i32 -1, i32 0, i32 -1, i32 0], align 16
-define i32 @foo(i32* noalias nocapture readonly %src) {
+define i32 @foo(ptr noalias nocapture readonly %src) {
entry:
br label %loop
loop: ; preds = %loop, %entry
%iv = phi i64 [ 0, %entry ], [ %inc, %loop ]
%r = phi i32 [ 0, %entry ], [ %add, %loop ]
- %arrayidx = getelementptr inbounds i32, i32* %src, i64 %iv
- %src_element = load i32, i32* %arrayidx, align 4
- %array_const_idx = getelementptr inbounds [9 x i32], [9 x i32]* @known_constant, i64 0, i64 %iv
- %const_array_element = load i32, i32* %array_const_idx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %src, i64 %iv
+ %src_element = load i32, ptr %arrayidx, align 4
+ %array_const_idx = getelementptr inbounds [9 x i32], ptr @known_constant, i64 0, i64 %iv
+ %const_array_element = load i32, ptr %array_const_idx, align 4
%mul = mul nsw i32 %src_element, %const_array_element
%add = add nsw i32 %mul, %r
%inc = add nuw nsw i64 %iv, 1
; RUN: opt -S -passes='require<opt-remark-emit>,loop(loop-unroll-full)' < %s | FileCheck %s
; Unroll twice, with first loop exit kept
-define void @s32_max1(i32 %n, i32* %p) {
+define void @s32_max1(i32 %n, ptr %p) {
;
; CHECK-LABEL: @s32_max1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[N:%.*]], 1
; CHECK-NEXT: br label [[DO_BODY:%.*]]
; CHECK: do.body:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, i32* [[P:%.*]], i32 [[N]]
-; CHECK-NEXT: store i32 [[N]], i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 [[N]]
+; CHECK-NEXT: store i32 [[N]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INC:%.*]] = add i32 [[N]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[N]], [[ADD]]
; CHECK-NEXT: br i1 [[CMP]], label [[DO_BODY_1:%.*]], label [[DO_END:%.*]]
; CHECK: do.body.1:
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr i32, i32* [[P]], i32 [[INC]]
-; CHECK-NEXT: store i32 [[INC]], i32* [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr i32, ptr [[P]], i32 [[INC]]
+; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX_1]], align 4
; CHECK-NEXT: br label [[DO_END]]
; CHECK: do.end:
; CHECK-NEXT: ret void
do.body:
%i.0 = phi i32 [ %n, %entry ], [ %inc, %do.body ]
- %arrayidx = getelementptr i32, i32* %p, i32 %i.0
- store i32 %i.0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr %p, i32 %i.0
+ store i32 %i.0, ptr %arrayidx, align 4
%inc = add i32 %i.0, 1
%cmp = icmp slt i32 %i.0, %add
br i1 %cmp, label %do.body, label %do.end ; taken either 0 or 1 times
}
; Unroll thrice, with first loop exit kept
-define void @s32_max2(i32 %n, i32* %p) {
+define void @s32_max2(i32 %n, ptr %p) {
;
; CHECK-LABEL: @s32_max2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[N:%.*]], 2
; CHECK-NEXT: br label [[DO_BODY:%.*]]
; CHECK: do.body:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, i32* [[P:%.*]], i32 [[N]]
-; CHECK-NEXT: store i32 [[N]], i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 [[N]]
+; CHECK-NEXT: store i32 [[N]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INC:%.*]] = add i32 [[N]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[N]], [[ADD]]
; CHECK-NEXT: br i1 [[CMP]], label [[DO_BODY_1:%.*]], label [[DO_END:%.*]]
; CHECK: do.body.1:
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr i32, i32* [[P]], i32 [[INC]]
-; CHECK-NEXT: store i32 [[INC]], i32* [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr i32, ptr [[P]], i32 [[INC]]
+; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX_1]], align 4
; CHECK-NEXT: [[INC_1:%.*]] = add i32 [[INC]], 1
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr i32, i32* [[P]], i32 [[INC_1]]
-; CHECK-NEXT: store i32 [[INC_1]], i32* [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr i32, ptr [[P]], i32 [[INC_1]]
+; CHECK-NEXT: store i32 [[INC_1]], ptr [[ARRAYIDX_2]], align 4
; CHECK-NEXT: br label [[DO_END]]
; CHECK: do.end:
; CHECK-NEXT: ret void
do.body:
%i.0 = phi i32 [ %n, %entry ], [ %inc, %do.body ]
- %arrayidx = getelementptr i32, i32* %p, i32 %i.0
- store i32 %i.0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr %p, i32 %i.0
+ store i32 %i.0, ptr %arrayidx, align 4
%inc = add i32 %i.0, 1
%cmp = icmp slt i32 %i.0, %add
br i1 %cmp, label %do.body, label %do.end ; taken either 0 or 2 times
}
; Should not be unrolled
-define void @s32_maxx(i32 %n, i32 %x, i32* %p) {
+define void @s32_maxx(i32 %n, i32 %x, ptr %p) {
;
; CHECK-LABEL: @s32_maxx(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[DO_BODY:%.*]]
; CHECK: do.body:
; CHECK-NEXT: [[I_0:%.*]] = phi i32 [ [[N]], [[ENTRY:%.*]] ], [ [[INC:%.*]], [[DO_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, i32* [[P:%.*]], i32 [[I_0]]
-; CHECK-NEXT: store i32 [[I_0]], i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 [[I_0]]
+; CHECK-NEXT: store i32 [[I_0]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INC]] = add i32 [[I_0]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], [[ADD]]
; CHECK-NEXT: br i1 [[CMP]], label [[DO_BODY]], label [[DO_END:%.*]]
do.body:
%i.0 = phi i32 [ %n, %entry ], [ %inc, %do.body ]
- %arrayidx = getelementptr i32, i32* %p, i32 %i.0
- store i32 %i.0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr %p, i32 %i.0
+ store i32 %i.0, ptr %arrayidx, align 4
%inc = add i32 %i.0, 1
%cmp = icmp slt i32 %i.0, %add
br i1 %cmp, label %do.body, label %do.end ; taken either 0 or x times
}
; Should not be unrolled
-define void @s32_max2_unpredictable_exit(i32 %n, i32 %x, i32* %p) {
+define void @s32_max2_unpredictable_exit(i32 %n, i32 %x, ptr %p) {
;
; CHECK-LABEL: @s32_max2_unpredictable_exit(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[I_0]], [[X:%.*]]
; CHECK-NEXT: br i1 [[CMP]], label [[DO_END:%.*]], label [[IF_END]]
; CHECK: if.end:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, i32* [[P:%.*]], i32 [[I_0]]
-; CHECK-NEXT: store i32 [[I_0]], i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 [[I_0]]
+; CHECK-NEXT: store i32 [[I_0]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INC]] = add i32 [[I_0]], 1
; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[I_0]], [[ADD]]
; CHECK-NEXT: br i1 [[CMP1]], label [[DO_BODY]], label [[DO_END]]
br i1 %cmp, label %do.end, label %if.end ; unpredictable
if.end:
- %arrayidx = getelementptr i32, i32* %p, i32 %i.0
- store i32 %i.0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr %p, i32 %i.0
+ store i32 %i.0, ptr %arrayidx, align 4
%inc = add i32 %i.0, 1
%cmp1 = icmp slt i32 %i.0, %add
br i1 %cmp1, label %do.body, label %do.end ; taken either 0 or 2 times
}
; Unroll twice, with first loop exit kept
-define void @u32_max1(i32 %n, i32* %p) {
+define void @u32_max1(i32 %n, ptr %p) {
;
; CHECK-LABEL: @u32_max1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[N:%.*]], 1
; CHECK-NEXT: br label [[DO_BODY:%.*]]
; CHECK: do.body:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, i32* [[P:%.*]], i32 [[N]]
-; CHECK-NEXT: store i32 [[N]], i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 [[N]]
+; CHECK-NEXT: store i32 [[N]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INC:%.*]] = add i32 [[N]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[N]], [[ADD]]
; CHECK-NEXT: br i1 [[CMP]], label [[DO_BODY_1:%.*]], label [[DO_END:%.*]]
; CHECK: do.body.1:
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr i32, i32* [[P]], i32 [[INC]]
-; CHECK-NEXT: store i32 [[INC]], i32* [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr i32, ptr [[P]], i32 [[INC]]
+; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX_1]], align 4
; CHECK-NEXT: br label [[DO_END]]
; CHECK: do.end:
; CHECK-NEXT: ret void
do.body:
%i.0 = phi i32 [ %n, %entry ], [ %inc, %do.body ]
- %arrayidx = getelementptr i32, i32* %p, i32 %i.0
- store i32 %i.0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr %p, i32 %i.0
+ store i32 %i.0, ptr %arrayidx, align 4
%inc = add i32 %i.0, 1
%cmp = icmp ult i32 %i.0, %add
br i1 %cmp, label %do.body, label %do.end ; taken either 0 or 1 times
}
; Unroll thrice, with first loop exit kept
-define void @u32_max2(i32 %n, i32* %p) {
+define void @u32_max2(i32 %n, ptr %p) {
;
; CHECK-LABEL: @u32_max2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[N:%.*]], 2
; CHECK-NEXT: br label [[DO_BODY:%.*]]
; CHECK: do.body:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, i32* [[P:%.*]], i32 [[N]]
-; CHECK-NEXT: store i32 [[N]], i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 [[N]]
+; CHECK-NEXT: store i32 [[N]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INC:%.*]] = add i32 [[N]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[N]], [[ADD]]
; CHECK-NEXT: br i1 [[CMP]], label [[DO_BODY_1:%.*]], label [[DO_END:%.*]]
; CHECK: do.body.1:
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr i32, i32* [[P]], i32 [[INC]]
-; CHECK-NEXT: store i32 [[INC]], i32* [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr i32, ptr [[P]], i32 [[INC]]
+; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX_1]], align 4
; CHECK-NEXT: [[INC_1:%.*]] = add i32 [[INC]], 1
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr i32, i32* [[P]], i32 [[INC_1]]
-; CHECK-NEXT: store i32 [[INC_1]], i32* [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr i32, ptr [[P]], i32 [[INC_1]]
+; CHECK-NEXT: store i32 [[INC_1]], ptr [[ARRAYIDX_2]], align 4
; CHECK-NEXT: br label [[DO_END]]
; CHECK: do.end:
; CHECK-NEXT: ret void
do.body:
%i.0 = phi i32 [ %n, %entry ], [ %inc, %do.body ]
- %arrayidx = getelementptr i32, i32* %p, i32 %i.0
- store i32 %i.0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr %p, i32 %i.0
+ store i32 %i.0, ptr %arrayidx, align 4
%inc = add i32 %i.0, 1
%cmp = icmp ult i32 %i.0, %add
br i1 %cmp, label %do.body, label %do.end ; taken either 0 or 2 times
}
; Should not be unrolled
-define void @u32_maxx(i32 %n, i32 %x, i32* %p) {
+define void @u32_maxx(i32 %n, i32 %x, ptr %p) {
;
; CHECK-LABEL: @u32_maxx(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[DO_BODY:%.*]]
; CHECK: do.body:
; CHECK-NEXT: [[I_0:%.*]] = phi i32 [ [[N]], [[ENTRY:%.*]] ], [ [[INC:%.*]], [[DO_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, i32* [[P:%.*]], i32 [[I_0]]
-; CHECK-NEXT: store i32 [[I_0]], i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 [[I_0]]
+; CHECK-NEXT: store i32 [[I_0]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INC]] = add i32 [[I_0]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[I_0]], [[ADD]]
; CHECK-NEXT: br i1 [[CMP]], label [[DO_BODY]], label [[DO_END:%.*]]
do.body:
%i.0 = phi i32 [ %n, %entry ], [ %inc, %do.body ]
- %arrayidx = getelementptr i32, i32* %p, i32 %i.0
- store i32 %i.0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr %p, i32 %i.0
+ store i32 %i.0, ptr %arrayidx, align 4
%inc = add i32 %i.0, 1
%cmp = icmp ult i32 %i.0, %add
br i1 %cmp, label %do.body, label %do.end ; taken either 0 or x times
}
; Should not be unrolled
-define void @u32_max2_unpredictable_exit(i32 %n, i32 %x, i32* %p) {
+define void @u32_max2_unpredictable_exit(i32 %n, i32 %x, ptr %p) {
;
; CHECK-LABEL: @u32_max2_unpredictable_exit(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[I_0]], [[X:%.*]]
; CHECK-NEXT: br i1 [[CMP]], label [[DO_END:%.*]], label [[IF_END]]
; CHECK: if.end:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, i32* [[P:%.*]], i32 [[I_0]]
-; CHECK-NEXT: store i32 [[I_0]], i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 [[I_0]]
+; CHECK-NEXT: store i32 [[I_0]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INC]] = add i32 [[I_0]], 1
; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i32 [[I_0]], [[ADD]]
; CHECK-NEXT: br i1 [[CMP1]], label [[DO_BODY]], label [[DO_END]]
br i1 %cmp, label %do.end, label %if.end ; unpredictable
if.end:
- %arrayidx = getelementptr i32, i32* %p, i32 %i.0
- store i32 %i.0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr %p, i32 %i.0
+ store i32 %i.0, ptr %arrayidx, align 4
%inc = add i32 %i.0, 1
%cmp1 = icmp ult i32 %i.0, %add
br i1 %cmp1, label %do.body, label %do.end ; taken either 0 or 2 times
; CHECK-NEXT: start:
; CHECK-NEXT: [[A1:%.*]] = alloca [2 x i64], align 8
; CHECK-NEXT: [[A2:%.*]] = alloca [2 x i64], align 8
-; CHECK-NEXT: [[A1_0:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[A1]], i64 0, i64 0
-; CHECK-NEXT: store i64 -5015437470765251660, i64* [[A1_0]], align 8
-; CHECK-NEXT: [[A1_1:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[A1]], i64 0, i64 1
-; CHECK-NEXT: store i64 -8661621401413125213, i64* [[A1_1]], align 8
-; CHECK-NEXT: [[A2_0:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[A2]], i64 0, i64 0
-; CHECK-NEXT: store i64 -5015437470765251660, i64* [[A2_0]], align 8
-; CHECK-NEXT: [[A2_1:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[A2]], i64 0, i64 1
-; CHECK-NEXT: store i64 -8661621401413125213, i64* [[A2_1]], align 8
+; CHECK-NEXT: store i64 -5015437470765251660, ptr [[A1]], align 8
+; CHECK-NEXT: [[A1_1:%.*]] = getelementptr inbounds [2 x i64], ptr [[A1]], i64 0, i64 1
+; CHECK-NEXT: store i64 -8661621401413125213, ptr [[A1_1]], align 8
+; CHECK-NEXT: store i64 -5015437470765251660, ptr [[A2]], align 8
+; CHECK-NEXT: [[A2_1:%.*]] = getelementptr inbounds [2 x i64], ptr [[A2]], i64 0, i64 1
+; CHECK-NEXT: store i64 -8661621401413125213, ptr [[A2_1]], align 8
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[A1]], i64 0, i64 0
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[A2]], i64 0, i64 0
-; CHECK-NEXT: [[LOAD1:%.*]] = load i64, i64* [[GEP1]], align 8
-; CHECK-NEXT: [[LOAD2:%.*]] = load i64, i64* [[GEP2]], align 8
+; CHECK-NEXT: [[LOAD1:%.*]] = load i64, ptr [[A1]], align 8
+; CHECK-NEXT: [[LOAD2:%.*]] = load i64, ptr [[A2]], align 8
; CHECK-NEXT: [[EXITCOND2:%.*]] = icmp eq i64 [[LOAD1]], [[LOAD2]]
; CHECK-NEXT: br i1 [[EXITCOND2]], label [[LATCH:%.*]], label [[EXIT:%.*]]
; CHECK: latch:
-; CHECK-NEXT: [[GEP1_1:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[A1]], i64 0, i64 1
-; CHECK-NEXT: [[GEP2_1:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[A2]], i64 0, i64 1
-; CHECK-NEXT: [[LOAD1_1:%.*]] = load i64, i64* [[GEP1_1]], align 8
-; CHECK-NEXT: [[LOAD2_1:%.*]] = load i64, i64* [[GEP2_1]], align 8
+; CHECK-NEXT: [[GEP1_1:%.*]] = getelementptr inbounds [2 x i64], ptr [[A1]], i64 0, i64 1
+; CHECK-NEXT: [[GEP2_1:%.*]] = getelementptr inbounds [2 x i64], ptr [[A2]], i64 0, i64 1
+; CHECK-NEXT: [[LOAD1_1:%.*]] = load i64, ptr [[GEP1_1]], align 8
+; CHECK-NEXT: [[LOAD2_1:%.*]] = load i64, ptr [[GEP2_1]], align 8
; CHECK-NEXT: [[EXITCOND2_1:%.*]] = icmp eq i64 [[LOAD1_1]], [[LOAD2_1]]
; CHECK-NEXT: br i1 [[EXITCOND2_1]], label [[LATCH_1:%.*]], label [[EXIT]]
; CHECK: latch.1:
start:
%a1 = alloca [2 x i64], align 8
%a2 = alloca [2 x i64], align 8
- %a1.0 = getelementptr inbounds [2 x i64], [2 x i64]* %a1, i64 0, i64 0
- store i64 -5015437470765251660, i64* %a1.0, align 8
- %a1.1 = getelementptr inbounds [2 x i64], [2 x i64]* %a1, i64 0, i64 1
- store i64 -8661621401413125213, i64* %a1.1, align 8
- %a2.0 = getelementptr inbounds [2 x i64], [2 x i64]* %a2, i64 0, i64 0
- store i64 -5015437470765251660, i64* %a2.0, align 8
- %a2.1 = getelementptr inbounds [2 x i64], [2 x i64]* %a2, i64 0, i64 1
- store i64 -8661621401413125213, i64* %a2.1, align 8
+ store i64 -5015437470765251660, ptr %a1, align 8
+ %a1.1 = getelementptr inbounds [2 x i64], ptr %a1, i64 0, i64 1
+ store i64 -8661621401413125213, ptr %a1.1, align 8
+ store i64 -5015437470765251660, ptr %a2, align 8
+ %a2.1 = getelementptr inbounds [2 x i64], ptr %a2, i64 0, i64 1
+ store i64 -8661621401413125213, ptr %a2.1, align 8
br label %loop
loop:
%iv = phi i64 [ 0, %start ], [ %iv.next, %latch ]
- %gep1 = getelementptr inbounds [2 x i64], [2 x i64]* %a1, i64 0, i64 %iv
- %gep2 = getelementptr inbounds [2 x i64], [2 x i64]* %a2, i64 0, i64 %iv
- %load1 = load i64, i64* %gep1, align 8
- %load2 = load i64, i64* %gep2, align 8
+ %gep1 = getelementptr inbounds [2 x i64], ptr %a1, i64 0, i64 %iv
+ %gep2 = getelementptr inbounds [2 x i64], ptr %a2, i64 0, i64 %iv
+ %load1 = load i64, ptr %gep1, align 8
+ %load2 = load i64, ptr %gep2, align 8
%exitcond2 = icmp eq i64 %load1, %load2
br i1 %exitcond2, label %latch, label %exit
; CHECK-NEXT: start:
; CHECK-NEXT: [[A1:%.*]] = alloca [2 x i64], align 8
; CHECK-NEXT: [[A2:%.*]] = alloca [2 x i64], align 8
-; CHECK-NEXT: [[A1_0:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[A1]], i64 0, i64 0
-; CHECK-NEXT: store i64 -5015437470765251660, i64* [[A1_0]], align 8
-; CHECK-NEXT: [[A1_1:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[A1]], i64 0, i64 1
-; CHECK-NEXT: store i64 -8661621401413125213, i64* [[A1_1]], align 8
-; CHECK-NEXT: [[A2_0:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[A2]], i64 0, i64 0
-; CHECK-NEXT: store i64 -5015437470765251660, i64* [[A2_0]], align 8
-; CHECK-NEXT: [[A2_1:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[A2]], i64 0, i64 1
-; CHECK-NEXT: store i64 -8661621401413125213, i64* [[A2_1]], align 8
+; CHECK-NEXT: store i64 -5015437470765251660, ptr [[A1]], align 8
+; CHECK-NEXT: [[A1_1:%.*]] = getelementptr inbounds [2 x i64], ptr [[A1]], i64 0, i64 1
+; CHECK-NEXT: store i64 -8661621401413125213, ptr [[A1_1]], align 8
+; CHECK-NEXT: store i64 -5015437470765251660, ptr [[A2]], align 8
+; CHECK-NEXT: [[A2_1:%.*]] = getelementptr inbounds [2 x i64], ptr [[A2]], i64 0, i64 1
+; CHECK-NEXT: store i64 -8661621401413125213, ptr [[A2_1]], align 8
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: latch:
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[A1]], i64 0, i64 0
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[A2]], i64 0, i64 0
-; CHECK-NEXT: [[LOAD1:%.*]] = load i64, i64* [[GEP1]], align 8
-; CHECK-NEXT: [[LOAD2:%.*]] = load i64, i64* [[GEP2]], align 8
+; CHECK-NEXT: [[LOAD1:%.*]] = load i64, ptr [[A1]], align 8
+; CHECK-NEXT: [[LOAD2:%.*]] = load i64, ptr [[A2]], align 8
; CHECK-NEXT: [[EXITCOND2:%.*]] = icmp eq i64 [[LOAD1]], [[LOAD2]]
; CHECK-NEXT: br i1 [[EXITCOND2]], label [[LOOP_1:%.*]], label [[EXIT:%.*]]
; CHECK: loop.1:
; CHECK-NEXT: br label [[LATCH_1:%.*]]
; CHECK: latch.1:
-; CHECK-NEXT: [[GEP1_1:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[A1]], i64 0, i64 1
-; CHECK-NEXT: [[GEP2_1:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[A2]], i64 0, i64 1
-; CHECK-NEXT: [[LOAD1_1:%.*]] = load i64, i64* [[GEP1_1]], align 8
-; CHECK-NEXT: [[LOAD2_1:%.*]] = load i64, i64* [[GEP2_1]], align 8
+; CHECK-NEXT: [[GEP1_1:%.*]] = getelementptr inbounds [2 x i64], ptr [[A1]], i64 0, i64 1
+; CHECK-NEXT: [[GEP2_1:%.*]] = getelementptr inbounds [2 x i64], ptr [[A2]], i64 0, i64 1
+; CHECK-NEXT: [[LOAD1_1:%.*]] = load i64, ptr [[GEP1_1]], align 8
+; CHECK-NEXT: [[LOAD2_1:%.*]] = load i64, ptr [[GEP2_1]], align 8
; CHECK-NEXT: [[EXITCOND2_1:%.*]] = icmp eq i64 [[LOAD1_1]], [[LOAD2_1]]
; CHECK-NEXT: br i1 [[EXITCOND2_1]], label [[LOOP_2:%.*]], label [[EXIT]]
; CHECK: loop.2:
start:
%a1 = alloca [2 x i64], align 8
%a2 = alloca [2 x i64], align 8
- %a1.0 = getelementptr inbounds [2 x i64], [2 x i64]* %a1, i64 0, i64 0
- store i64 -5015437470765251660, i64* %a1.0, align 8
- %a1.1 = getelementptr inbounds [2 x i64], [2 x i64]* %a1, i64 0, i64 1
- store i64 -8661621401413125213, i64* %a1.1, align 8
- %a2.0 = getelementptr inbounds [2 x i64], [2 x i64]* %a2, i64 0, i64 0
- store i64 -5015437470765251660, i64* %a2.0, align 8
- %a2.1 = getelementptr inbounds [2 x i64], [2 x i64]* %a2, i64 0, i64 1
- store i64 -8661621401413125213, i64* %a2.1, align 8
+ store i64 -5015437470765251660, ptr %a1, align 8
+ %a1.1 = getelementptr inbounds [2 x i64], ptr %a1, i64 0, i64 1
+ store i64 -8661621401413125213, ptr %a1.1, align 8
+ store i64 -5015437470765251660, ptr %a2, align 8
+ %a2.1 = getelementptr inbounds [2 x i64], ptr %a2, i64 0, i64 1
+ store i64 -8661621401413125213, ptr %a2.1, align 8
br label %loop
loop:
latch:
%iv.next = add nuw nsw i64 %iv, 1
- %gep1 = getelementptr inbounds [2 x i64], [2 x i64]* %a1, i64 0, i64 %iv
- %gep2 = getelementptr inbounds [2 x i64], [2 x i64]* %a2, i64 0, i64 %iv
- %load1 = load i64, i64* %gep1, align 8
- %load2 = load i64, i64* %gep2, align 8
+ %gep1 = getelementptr inbounds [2 x i64], ptr %a1, i64 0, i64 %iv
+ %gep2 = getelementptr inbounds [2 x i64], ptr %a2, i64 0, i64 %iv
+ %load1 = load i64, ptr %gep1, align 8
+ %load2 = load i64, ptr %gep2, align 8
%exitcond2 = icmp eq i64 %load1, %load2
br i1 %exitcond2, label %loop, label %exit
;; Check that we don't emit expensive instructions to compute trip
;; counts when unrolling loops.
-define i32 @test(i64 %v12, i8* %array, i64* %loc) {
+define i32 @test(i64 %v12, ptr %array, ptr %loc) {
; CHECK-LABEL: @test(
; CHECK-NOT: udiv
entry:
- %step = load i64, i64* %loc, !range !0
+ %step = load i64, ptr %loc, !range !0
br label %loop
loop: ; preds = %entry, %loop
%k.015 = phi i64 [ %v15, %loop ], [ %v12, %entry ]
- %v14 = getelementptr inbounds i8, i8* %array, i64 %k.015
- store i8 0, i8* %v14
+ %v14 = getelementptr inbounds i8, ptr %array, i64 %k.015
+ store i8 0, ptr %v14
%v15 = add nuw nsw i64 %k.015, %step
%v16 = icmp slt i64 %v15, 8193
br i1 %v16, label %loop, label %loopexit
;; exists in the code and we don't need to expand it once more.
;; Thus, it shouldn't prevent us from unrolling the loop.
-define i32 @test2(i64* %loc, i64 %conv7) {
+define i32 @test2(ptr %loc, i64 %conv7) {
; CHECK-LABEL: @test2(
; CHECK: udiv
; CHECK: udiv
; CHECK-NOT: udiv
; CHECK-LABEL: for.body
entry:
- %rem0 = load i64, i64* %loc, align 8
+ %rem0 = load i64, ptr %loc, align 8
%ExpensiveComputation = udiv i64 %rem0, 42 ; <<< Extra computations are added to the trip-count expression
br label %bb1
bb1:
br i1 %cmp, label %exit, label %for.body
exit:
%rem3 = phi i64 [ %rem2, %for.body ]
- store i64 %rem3, i64* %loc, align 8
+ store i64 %rem3, ptr %loc, align 8
ret i32 0
}
@B = common global i32 0, align 4
-define void @foo(i32* noalias %A, i32 %B, i32 %C) {
+define void @foo(ptr noalias %A, i32 %B, i32 %C) {
entry:
br label %for.body
%i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
; The real loop.
%mul = mul nsw i32 %B, %C
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.01
- store i32 %mul, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i.01
+ store i32 %mul, ptr %arrayidx, align 4
%inc = add nsw i32 %i.01, 1
%exitcond = icmp ne i32 %inc, 4
; A bunch of annotations
- %annot.0 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.1 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.2 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.3 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.4 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.5 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.6 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.7 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.8 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.9 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.10 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.11 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.12 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.13 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.14 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.15 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.16 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.17 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.18 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.19 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.20 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.21 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.22 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.23 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.24 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.25 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.26 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.27 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.28 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.29 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.30 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.31 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.32 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.33 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.34 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.35 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.36 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.37 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.38 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.39 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.40 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.41 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.42 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.43 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.44 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.45 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.46 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.47 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.48 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.49 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.50 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.51 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.52 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.53 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.54 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.55 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.56 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.57 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.58 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.59 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.60 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.61 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.62 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.63 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.64 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.65 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.66 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.67 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.68 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.69 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.70 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.71 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.72 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.73 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.74 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.75 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.76 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.77 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.78 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.79 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.80 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.81 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.82 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.83 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.84 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.85 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.86 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.87 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.88 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.89 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.90 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.91 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.92 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.93 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.94 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.95 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.96 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.97 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.98 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
- %annot.99 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0)
+ %annot.0 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.1 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.2 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.3 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.4 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.5 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.6 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.7 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.8 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.9 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.10 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.11 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.12 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.13 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.14 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.15 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.16 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.17 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.18 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.19 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.20 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.21 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.22 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.23 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.24 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.25 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.26 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.27 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.28 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.29 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.30 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.31 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.32 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.33 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.34 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.35 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.36 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.37 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.38 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.39 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.40 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.41 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.42 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.43 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.44 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.45 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.46 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.47 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.48 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.49 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.50 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.51 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.52 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.53 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.54 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.55 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.56 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.57 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.58 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.59 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.60 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.61 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.62 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.63 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.64 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.65 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.66 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.67 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.68 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.69 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.70 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.71 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.72 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.73 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.74 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.75 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.76 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.77 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.78 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.79 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.80 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.81 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.82 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.83 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.84 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.85 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.86 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.87 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.88 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.89 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.90 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.91 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.92 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.93 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.94 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.95 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.96 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.97 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.98 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
+ %annot.99 = tail call i32 @llvm.annotation.i32(i32 %i.01, ptr null, ptr null, i32 0)
br i1 %exitcond, label %for.body, label %for.end
for.end: ; preds = %for.body
ret void
}
-declare i32 @llvm.annotation.i32(i32, i8*, i8*, i32)
+declare i32 @llvm.annotation.i32(i32, ptr, ptr, i32)
; Make sure that this test doesn't crash because of dangling pointer in SCEV.
declare void @llvm.experimental.guard(i1, ...)
-define void @test(i32* %p, i8** %p2, i64* %dest) {
+define void @test(ptr %p, ptr %p2, ptr %dest) {
; CHECK-LABEL: @test(
br label %innermost.loop
store.block: ; preds = %innermost.loop
- store i64 %tmp20, i64* %dest, align 8
+ store i64 %tmp20, ptr %dest, align 8
br i1 %tmp1, label %exit, label %inner.latch
inner.latch: ; preds = %store.block
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -passes=loop-unroll -unroll-count=4 < %s | FileCheck %s
-define void @test_inside(i32* %addr1, i32* %addr2) {
+define void @test_inside(ptr %addr1, ptr %addr2) {
; CHECK-LABEL: @test_inside(
; CHECK-NEXT: start:
; CHECK-NEXT: br label [[BODY:%.*]]
; CHECK: body:
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata !0)
-; CHECK-NEXT: [[X:%.*]] = load i32, i32* [[ADDR1:%.*]], align 4, !alias.scope !0
-; CHECK-NEXT: store i32 [[X]], i32* [[ADDR2:%.*]], align 4, !noalias !0
-; CHECK-NEXT: [[ADDR1I_1:%.*]] = getelementptr inbounds i32, i32* [[ADDR1]], i32 1
-; CHECK-NEXT: [[ADDR2I_1:%.*]] = getelementptr inbounds i32, i32* [[ADDR2]], i32 1
+; CHECK-NEXT: [[X:%.*]] = load i32, ptr [[ADDR1:%.*]], align 4, !alias.scope !0
+; CHECK-NEXT: store i32 [[X]], ptr [[ADDR2:%.*]], align 4, !noalias !0
+; CHECK-NEXT: [[ADDR1I_1:%.*]] = getelementptr inbounds i32, ptr [[ADDR1]], i32 1
+; CHECK-NEXT: [[ADDR2I_1:%.*]] = getelementptr inbounds i32, ptr [[ADDR2]], i32 1
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata !3)
-; CHECK-NEXT: [[X_1:%.*]] = load i32, i32* [[ADDR1I_1]], align 4, !alias.scope !3
-; CHECK-NEXT: store i32 [[X_1]], i32* [[ADDR2I_1]], align 4, !noalias !3
+; CHECK-NEXT: [[X_1:%.*]] = load i32, ptr [[ADDR1I_1]], align 4, !alias.scope !3
+; CHECK-NEXT: store i32 [[X_1]], ptr [[ADDR2I_1]], align 4, !noalias !3
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata !5)
-; CHECK-NEXT: [[X_2:%.*]] = load i32, i32* [[ADDR1]], align 4, !alias.scope !5
-; CHECK-NEXT: store i32 [[X_2]], i32* [[ADDR2]], align 4, !noalias !5
-; CHECK-NEXT: [[ADDR1I_3:%.*]] = getelementptr inbounds i32, i32* [[ADDR1]], i32 1
-; CHECK-NEXT: [[ADDR2I_3:%.*]] = getelementptr inbounds i32, i32* [[ADDR2]], i32 1
+; CHECK-NEXT: [[X_2:%.*]] = load i32, ptr [[ADDR1]], align 4, !alias.scope !5
+; CHECK-NEXT: store i32 [[X_2]], ptr [[ADDR2]], align 4, !noalias !5
+; CHECK-NEXT: [[ADDR1I_3:%.*]] = getelementptr inbounds i32, ptr [[ADDR1]], i32 1
+; CHECK-NEXT: [[ADDR2I_3:%.*]] = getelementptr inbounds i32, ptr [[ADDR2]], i32 1
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata !7)
-; CHECK-NEXT: [[X_3:%.*]] = load i32, i32* [[ADDR1I_3]], align 4, !alias.scope !7
-; CHECK-NEXT: store i32 [[X_3]], i32* [[ADDR2I_3]], align 4, !noalias !7
+; CHECK-NEXT: [[X_3:%.*]] = load i32, ptr [[ADDR1I_3]], align 4, !alias.scope !7
+; CHECK-NEXT: store i32 [[X_3]], ptr [[ADDR2I_3]], align 4, !noalias !7
; CHECK-NEXT: ret void
;
start:
body:
%i = phi i32 [ 0, %start ], [ %i2, %body ]
%j = and i32 %i, 1
- %addr1i = getelementptr inbounds i32, i32* %addr1, i32 %j
- %addr2i = getelementptr inbounds i32, i32* %addr2, i32 %j
+ %addr1i = getelementptr inbounds i32, ptr %addr1, i32 %j
+ %addr2i = getelementptr inbounds i32, ptr %addr2, i32 %j
call void @llvm.experimental.noalias.scope.decl(metadata !2)
- %x = load i32, i32* %addr1i, !alias.scope !2
- store i32 %x, i32* %addr2i, !noalias !2
+ %x = load i32, ptr %addr1i, !alias.scope !2
+ store i32 %x, ptr %addr2i, !noalias !2
%i2 = add i32 %i, 1
%cmp = icmp slt i32 %i2, 4
ret void
}
-define void @test_outside(i32* %addr1, i32* %addr2) {
+define void @test_outside(ptr %addr1, ptr %addr2) {
; CHECK-LABEL: @test_outside(
; CHECK-NEXT: start:
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata !0)
; CHECK-NEXT: br label [[BODY:%.*]]
; CHECK: body:
-; CHECK-NEXT: [[X:%.*]] = load i32, i32* [[ADDR1:%.*]], align 4, !alias.scope !0
-; CHECK-NEXT: store i32 [[X]], i32* [[ADDR2:%.*]], align 4, !noalias !0
-; CHECK-NEXT: [[ADDR1I_1:%.*]] = getelementptr inbounds i32, i32* [[ADDR1]], i32 1
-; CHECK-NEXT: [[ADDR2I_1:%.*]] = getelementptr inbounds i32, i32* [[ADDR2]], i32 1
-; CHECK-NEXT: [[X_1:%.*]] = load i32, i32* [[ADDR1I_1]], align 4, !alias.scope !0
-; CHECK-NEXT: store i32 [[X_1]], i32* [[ADDR2I_1]], align 4, !noalias !0
-; CHECK-NEXT: [[X_2:%.*]] = load i32, i32* [[ADDR1]], align 4, !alias.scope !0
-; CHECK-NEXT: store i32 [[X_2]], i32* [[ADDR2]], align 4, !noalias !0
-; CHECK-NEXT: [[ADDR1I_3:%.*]] = getelementptr inbounds i32, i32* [[ADDR1]], i32 1
-; CHECK-NEXT: [[ADDR2I_3:%.*]] = getelementptr inbounds i32, i32* [[ADDR2]], i32 1
-; CHECK-NEXT: [[X_3:%.*]] = load i32, i32* [[ADDR1I_3]], align 4, !alias.scope !0
-; CHECK-NEXT: store i32 [[X_3]], i32* [[ADDR2I_3]], align 4, !noalias !0
+; CHECK-NEXT: [[X:%.*]] = load i32, ptr [[ADDR1:%.*]], align 4, !alias.scope !0
+; CHECK-NEXT: store i32 [[X]], ptr [[ADDR2:%.*]], align 4, !noalias !0
+; CHECK-NEXT: [[ADDR1I_1:%.*]] = getelementptr inbounds i32, ptr [[ADDR1]], i32 1
+; CHECK-NEXT: [[ADDR2I_1:%.*]] = getelementptr inbounds i32, ptr [[ADDR2]], i32 1
+; CHECK-NEXT: [[X_1:%.*]] = load i32, ptr [[ADDR1I_1]], align 4, !alias.scope !0
+; CHECK-NEXT: store i32 [[X_1]], ptr [[ADDR2I_1]], align 4, !noalias !0
+; CHECK-NEXT: [[X_2:%.*]] = load i32, ptr [[ADDR1]], align 4, !alias.scope !0
+; CHECK-NEXT: store i32 [[X_2]], ptr [[ADDR2]], align 4, !noalias !0
+; CHECK-NEXT: [[ADDR1I_3:%.*]] = getelementptr inbounds i32, ptr [[ADDR1]], i32 1
+; CHECK-NEXT: [[ADDR2I_3:%.*]] = getelementptr inbounds i32, ptr [[ADDR2]], i32 1
+; CHECK-NEXT: [[X_3:%.*]] = load i32, ptr [[ADDR1I_3]], align 4, !alias.scope !0
+; CHECK-NEXT: store i32 [[X_3]], ptr [[ADDR2I_3]], align 4, !noalias !0
; CHECK-NEXT: ret void
;
start:
body:
%i = phi i32 [ 0, %start ], [ %i2, %body ]
%j = and i32 %i, 1
- %addr1i = getelementptr inbounds i32, i32* %addr1, i32 %j
- %addr2i = getelementptr inbounds i32, i32* %addr2, i32 %j
+ %addr1i = getelementptr inbounds i32, ptr %addr1, i32 %j
+ %addr2i = getelementptr inbounds i32, ptr %addr2, i32 %j
- %x = load i32, i32* %addr1i, !alias.scope !2
- store i32 %x, i32* %addr2i, !noalias !2
+ %x = load i32, ptr %addr1i, !alias.scope !2
+ store i32 %x, ptr %addr2i, !noalias !2
%i2 = add i32 %i, 1
%cmp = icmp slt i32 %i2, 4
; Check that loop unroll pass correctly handle loops with
; single exiting block not the loop header or latch.
-define void @test1(i32* noalias %A) {
+define void @test1(ptr noalias %A) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A:%.*]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A:%.*]], align 4
; CHECK-NEXT: call void @bar(i32 [[TMP0]])
; CHECK-NEXT: br label [[FOR_HEADER:%.*]]
; CHECK: for.header:
; CHECK: for.body:
; CHECK-NEXT: br label [[FOR_BODY_FOR_BODY_CRIT_EDGE:%.*]]
; CHECK: for.body.for.body_crit_edge:
-; CHECK-NEXT: [[ARRAYIDX_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 1
-; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, i32* [[ARRAYIDX_PHI_TRANS_INSERT]], align 4
+; CHECK-NEXT: [[ARRAYIDX_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 1
+; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, ptr [[ARRAYIDX_PHI_TRANS_INSERT]], align 4
; CHECK-NEXT: call void @bar(i32 [[DOTPRE]])
; CHECK-NEXT: br label [[FOR_BODY_1:%.*]]
; CHECK: for.body.1:
; CHECK-NEXT: br label [[FOR_BODY_FOR_BODY_CRIT_EDGE_1:%.*]]
; CHECK: for.body.for.body_crit_edge.1:
-; CHECK-NEXT: [[ARRAYIDX_PHI_TRANS_INSERT_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2
-; CHECK-NEXT: [[DOTPRE_1:%.*]] = load i32, i32* [[ARRAYIDX_PHI_TRANS_INSERT_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_PHI_TRANS_INSERT_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 2
+; CHECK-NEXT: [[DOTPRE_1:%.*]] = load i32, ptr [[ARRAYIDX_PHI_TRANS_INSERT_1]], align 4
; CHECK-NEXT: call void @bar(i32 [[DOTPRE_1]])
; CHECK-NEXT: br label [[FOR_BODY_2:%.*]]
; CHECK: for.body.2:
; CHECK-NEXT: br label [[FOR_BODY_FOR_BODY_CRIT_EDGE_2:%.*]]
; CHECK: for.body.for.body_crit_edge.2:
-; CHECK-NEXT: [[ARRAYIDX_PHI_TRANS_INSERT_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 3
-; CHECK-NEXT: [[DOTPRE_2:%.*]] = load i32, i32* [[ARRAYIDX_PHI_TRANS_INSERT_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_PHI_TRANS_INSERT_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 3
+; CHECK-NEXT: [[DOTPRE_2:%.*]] = load i32, ptr [[ARRAYIDX_PHI_TRANS_INSERT_2]], align 4
; CHECK-NEXT: call void @bar(i32 [[DOTPRE_2]])
; CHECK-NEXT: br label [[FOR_BODY_3:%.*]]
; CHECK: for.body.3:
; CHECK-NEXT: ret void
;
entry:
- %0 = load i32, i32* %A, align 4
+ %0 = load i32, ptr %A, align 4
call void @bar(i32 %0)
br label %for.header
for.header:
%1 = phi i32 [ %0, %entry ], [ %.pre, %for.body.for.body_crit_edge ]
%i = phi i64 [ 0, %entry ], [ %inc, %for.body.for.body_crit_edge ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %i
call void @bar(i32 %1)
br label %for.body
br i1 %cmp, label %for.body.for.body_crit_edge, label %for.end
for.body.for.body_crit_edge:
- %arrayidx.phi.trans.insert = getelementptr inbounds i32, i32* %A, i64 %inc
- %.pre = load i32, i32* %arrayidx.phi.trans.insert, align 4
+ %arrayidx.phi.trans.insert = getelementptr inbounds i32, ptr %A, i64 %inc
+ %.pre = load i32, ptr %arrayidx.phi.trans.insert, align 4
br label %for.header
for.end:
; (1) exiting block not dominating the loop latch; and
; (2) exiting terminator instructions cannot be simplified to unconditional.
-define void @test2(i32* noalias %A) {
+define void @test2(ptr noalias %A) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 true, label [[FOR_PREHEADER:%.*]], label [[FOR_END:%.*]]
; CHECK: for.preheader:
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A:%.*]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A:%.*]], align 4
; CHECK-NEXT: call void @bar(i32 [[TMP0]])
; CHECK-NEXT: br label [[FOR_HEADER:%.*]]
; CHECK: for.header:
; CHECK-NEXT: [[CMP:%.*]] = call i1 @foo(i64 [[I]])
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY_FOR_BODY_CRIT_EDGE]], label [[FOR_END_LOOPEXIT:%.*]]
; CHECK: for.body.for.body_crit_edge:
-; CHECK-NEXT: [[ARRAYIDX_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INC]]
-; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, i32* [[ARRAYIDX_PHI_TRANS_INSERT]], align 4
+; CHECK-NEXT: [[ARRAYIDX_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INC]]
+; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, ptr [[ARRAYIDX_PHI_TRANS_INSERT]], align 4
; CHECK-NEXT: call void @bar(i32 [[DOTPRE]])
; CHECK-NEXT: [[INC_1:%.*]] = add nuw nsw i64 [[INC]], 1
; CHECK-NEXT: br i1 true, label [[FOR_BODY_1:%.*]], label [[FOR_BODY_FOR_BODY_CRIT_EDGE_1:%.*]]
; CHECK-NEXT: [[CMP_1:%.*]] = call i1 @foo(i64 [[INC]])
; CHECK-NEXT: br i1 [[CMP_1]], label [[FOR_BODY_FOR_BODY_CRIT_EDGE_1]], label [[FOR_END_LOOPEXIT]]
; CHECK: for.body.for.body_crit_edge.1:
-; CHECK-NEXT: [[ARRAYIDX_PHI_TRANS_INSERT_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INC_1]]
-; CHECK-NEXT: [[DOTPRE_1:%.*]] = load i32, i32* [[ARRAYIDX_PHI_TRANS_INSERT_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_PHI_TRANS_INSERT_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INC_1]]
+; CHECK-NEXT: [[DOTPRE_1:%.*]] = load i32, ptr [[ARRAYIDX_PHI_TRANS_INSERT_1]], align 4
; CHECK-NEXT: call void @bar(i32 [[DOTPRE_1]])
; CHECK-NEXT: [[INC_2:%.*]] = add nuw nsw i64 [[INC_1]], 1
; CHECK-NEXT: br i1 true, label [[FOR_BODY_2:%.*]], label [[FOR_BODY_FOR_BODY_CRIT_EDGE_2:%.*]]
; CHECK-NEXT: [[CMP_2:%.*]] = call i1 @foo(i64 [[INC_1]])
; CHECK-NEXT: br i1 [[CMP_2]], label [[FOR_BODY_FOR_BODY_CRIT_EDGE_2]], label [[FOR_END_LOOPEXIT]]
; CHECK: for.body.for.body_crit_edge.2:
-; CHECK-NEXT: [[ARRAYIDX_PHI_TRANS_INSERT_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INC_2]]
-; CHECK-NEXT: [[DOTPRE_2:%.*]] = load i32, i32* [[ARRAYIDX_PHI_TRANS_INSERT_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_PHI_TRANS_INSERT_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INC_2]]
+; CHECK-NEXT: [[DOTPRE_2:%.*]] = load i32, ptr [[ARRAYIDX_PHI_TRANS_INSERT_2]], align 4
; CHECK-NEXT: call void @bar(i32 [[DOTPRE_2]])
; CHECK-NEXT: [[INC_3]] = add nsw i64 [[INC_2]], 1
; CHECK-NEXT: br i1 true, label [[FOR_BODY_3:%.*]], label [[FOR_BODY_FOR_BODY_CRIT_EDGE_3]]
; CHECK-NEXT: [[CMP_3:%.*]] = call i1 @foo(i64 [[INC_2]])
; CHECK-NEXT: br i1 [[CMP_3]], label [[FOR_BODY_FOR_BODY_CRIT_EDGE_3]], label [[FOR_END_LOOPEXIT]]
; CHECK: for.body.for.body_crit_edge.3:
-; CHECK-NEXT: [[ARRAYIDX_PHI_TRANS_INSERT_3:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INC_3]]
-; CHECK-NEXT: [[DOTPRE_3]] = load i32, i32* [[ARRAYIDX_PHI_TRANS_INSERT_3]], align 4
+; CHECK-NEXT: [[ARRAYIDX_PHI_TRANS_INSERT_3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INC_3]]
+; CHECK-NEXT: [[DOTPRE_3]] = load i32, ptr [[ARRAYIDX_PHI_TRANS_INSERT_3]], align 4
; CHECK-NEXT: br label [[FOR_HEADER]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: for.end.loopexit:
; CHECK-NEXT: br label [[FOR_END]]
br i1 true, label %for.preheader, label %for.end
for.preheader:
- %0 = load i32, i32* %A, align 4
+ %0 = load i32, ptr %A, align 4
call void @bar(i32 %0)
br label %for.header
for.header:
%1 = phi i32 [ %0, %for.preheader ], [ %.pre, %for.body.for.body_crit_edge ]
%i = phi i64 [ 0, %for.preheader ], [ %inc, %for.body.for.body_crit_edge ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %i
call void @bar(i32 %1)
%inc = add nsw i64 %i, 1
br i1 true, label %for.body, label %for.body.for.body_crit_edge
br i1 %cmp, label %for.body.for.body_crit_edge, label %for.end
for.body.for.body_crit_edge:
- %arrayidx.phi.trans.insert = getelementptr inbounds i32, i32* %A, i64 %inc
- %.pre = load i32, i32* %arrayidx.phi.trans.insert, align 4
+ %arrayidx.phi.trans.insert = getelementptr inbounds i32, ptr %A, i64 %inc
+ %.pre = load i32, ptr %arrayidx.phi.trans.insert, align 4
br label %for.header
for.end:
; (1) multiple exiting blocks; and
; (2) loop latch is not an exiting block.
-define void @test3(i32* noalias %A, i1 %cond) {
+define void @test3(ptr noalias %A, i1 %cond) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A:%.*]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A:%.*]], align 4
; CHECK-NEXT: call void @bar(i32 [[TMP0]])
; CHECK-NEXT: br label [[FOR_HEADER:%.*]]
; CHECK: for.header:
; CHECK: for.body:
; CHECK-NEXT: br label [[FOR_BODY_FOR_BODY_CRIT_EDGE:%.*]]
; CHECK: for.body.for.body_crit_edge:
-; CHECK-NEXT: [[ARRAYIDX_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 1
-; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, i32* [[ARRAYIDX_PHI_TRANS_INSERT]], align 4
+; CHECK-NEXT: [[ARRAYIDX_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 1
+; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, ptr [[ARRAYIDX_PHI_TRANS_INSERT]], align 4
; CHECK-NEXT: call void @bar(i32 [[DOTPRE]])
; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY_1:%.*]], label [[FOR_END]]
; CHECK: for.body.1:
; CHECK-NEXT: br label [[FOR_BODY_FOR_BODY_CRIT_EDGE_1:%.*]]
; CHECK: for.body.for.body_crit_edge.1:
-; CHECK-NEXT: [[ARRAYIDX_PHI_TRANS_INSERT_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2
-; CHECK-NEXT: [[DOTPRE_1:%.*]] = load i32, i32* [[ARRAYIDX_PHI_TRANS_INSERT_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_PHI_TRANS_INSERT_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 2
+; CHECK-NEXT: [[DOTPRE_1:%.*]] = load i32, ptr [[ARRAYIDX_PHI_TRANS_INSERT_1]], align 4
; CHECK-NEXT: call void @bar(i32 [[DOTPRE_1]])
; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY_2:%.*]], label [[FOR_END]]
; CHECK: for.body.2:
; CHECK-NEXT: br label [[FOR_BODY_FOR_BODY_CRIT_EDGE_2:%.*]]
; CHECK: for.body.for.body_crit_edge.2:
-; CHECK-NEXT: [[ARRAYIDX_PHI_TRANS_INSERT_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 3
-; CHECK-NEXT: [[DOTPRE_2:%.*]] = load i32, i32* [[ARRAYIDX_PHI_TRANS_INSERT_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_PHI_TRANS_INSERT_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 3
+; CHECK-NEXT: [[DOTPRE_2:%.*]] = load i32, ptr [[ARRAYIDX_PHI_TRANS_INSERT_2]], align 4
; CHECK-NEXT: call void @bar(i32 [[DOTPRE_2]])
; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY_3:%.*]], label [[FOR_END]]
; CHECK: for.body.3:
; CHECK-NEXT: ret void
;
entry:
- %0 = load i32, i32* %A, align 4
+ %0 = load i32, ptr %A, align 4
call void @bar(i32 %0)
br label %for.header
for.header:
%1 = phi i32 [ %0, %entry ], [ %.pre, %for.body.for.body_crit_edge ]
%i = phi i64 [ 0, %entry ], [ %inc, %for.body.for.body_crit_edge ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %i
call void @bar(i32 %1)
br i1 %cond, label %for.body, label %for.end
br i1 %cmp, label %for.body.for.body_crit_edge, label %for.end
for.body.for.body_crit_edge:
- %arrayidx.phi.trans.insert = getelementptr inbounds i32, i32* %A, i64 %inc
- %.pre = load i32, i32* %arrayidx.phi.trans.insert, align 4
+ %arrayidx.phi.trans.insert = getelementptr inbounds i32, ptr %A, i64 %inc
+ %.pre = load i32, ptr %arrayidx.phi.trans.insert, align 4
br label %for.header
for.end:
; Os-NOT: loop1.preheader
; Oz-NOT: loop1.preheader
-define void @unroll(i32 %iter, i32* %addr1, i32* %addr2) nounwind {
+define void @unroll(i32 %iter, ptr %addr1, ptr %addr2) nounwind {
entry:
br label %loop1
loop1:
%iv1 = phi i32 [ 0, %entry ], [ %inc1, %loop1.latch ]
- %offset1 = getelementptr i32, i32* %addr1, i32 %iv1
- store i32 %iv1, i32* %offset1, align 4
+ %offset1 = getelementptr i32, ptr %addr1, i32 %iv1
+ store i32 %iv1, ptr %offset1, align 4
br label %loop2.header
loop2.header:
loop2:
%iv2 = phi i32 [ 0, %loop2.header ], [ %inc2, %loop2 ]
- %offset2 = getelementptr i32, i32* %addr2, i32 %iv2
- store i32 %iv2, i32* %offset2, align 4
+ %offset2 = getelementptr i32, ptr %addr2, i32 %iv2
+ store i32 %iv2, ptr %offset2, align 4
%inc2 = add i32 %iv2, 1
%exitcnd2 = icmp uge i32 %inc2, %iter
br i1 %exitcnd2, label %exit2, label %loop2
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[REF_TMP:%.*]] = alloca [3 x i32], align 4
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast [3 x i32]* [[REF_TMP]] to i8*
-; CHECK-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* [[REF_TMP]], i64 0, i64 0
-; CHECK-NEXT: store i32 [[A:%.*]], i32* [[ARRAYINIT_BEGIN]], align 4
-; CHECK-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* [[REF_TMP]], i64 0, i64 1
-; CHECK-NEXT: store i32 [[B:%.*]], i32* [[ARRAYINIT_ELEMENT]], align 4
-; CHECK-NEXT: [[ARRAYINIT_ELEMENT1:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* [[REF_TMP]], i64 0, i64 2
-; CHECK-NEXT: store i32 [[C:%.*]], i32* [[ARRAYINIT_ELEMENT1]], align 4
+; CHECK-NEXT: store i32 [[A:%.*]], ptr [[REF_TMP]], align 4
+; CHECK-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [3 x i32], ptr [[REF_TMP]], i64 0, i64 1
+; CHECK-NEXT: store i32 [[B:%.*]], ptr [[ARRAYINIT_ELEMENT]], align 4
+; CHECK-NEXT: [[ARRAYINIT_ELEMENT1:%.*]] = getelementptr inbounds [3 x i32], ptr [[REF_TMP]], i64 0, i64 2
+; CHECK-NEXT: store i32 [[C:%.*]], ptr [[ARRAYINIT_ELEMENT1]], align 4
; CHECK-NEXT: [[CMP_I_I_I3:%.*]] = icmp slt i32 [[A]], [[B]]
-; CHECK-NEXT: [[SPEC_SELECT_I_I4:%.*]] = select i1 [[CMP_I_I_I3]], i32* [[ARRAYINIT_ELEMENT]], i32* [[ARRAYINIT_BEGIN]]
-; CHECK-NEXT: [[INCDEC_PTR_I_I5:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* [[REF_TMP]], i64 0, i64 2
+; CHECK-NEXT: [[SPEC_SELECT_I_I4:%.*]] = select i1 [[CMP_I_I_I3]], ptr [[ARRAYINIT_ELEMENT]], ptr [[REF_TMP]]
+; CHECK-NEXT: [[INCDEC_PTR_I_I5:%.*]] = getelementptr inbounds [3 x i32], ptr [[REF_TMP]], i64 0, i64 2
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, i32* [[SPEC_SELECT_I_I4]], align 4
-; CHECK-NEXT: [[DOTPRE2:%.*]] = load i32, i32* [[INCDEC_PTR_I_I5]], align 4
+; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, ptr [[SPEC_SELECT_I_I4]], align 4
+; CHECK-NEXT: [[DOTPRE2:%.*]] = load i32, ptr [[INCDEC_PTR_I_I5]], align 4
; CHECK-NEXT: [[CMP_I_I_I:%.*]] = icmp slt i32 [[DOTPRE]], [[DOTPRE2]]
-; CHECK-NEXT: [[SPEC_SELECT_I_I:%.*]] = select i1 [[CMP_I_I_I]], i32* [[INCDEC_PTR_I_I5]], i32* [[SPEC_SELECT_I_I4]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[SPEC_SELECT_I_I]], align 4
+; CHECK-NEXT: [[SPEC_SELECT_I_I:%.*]] = select i1 [[CMP_I_I_I]], ptr [[INCDEC_PTR_I_I5]], ptr [[SPEC_SELECT_I_I4]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[SPEC_SELECT_I_I]], align 4
; CHECK-NEXT: ret i32 [[TMP1]]
;
entry:
%ref.tmp = alloca [3 x i32], align 4
- %0 = bitcast [3 x i32]* %ref.tmp to i8*
- %arrayinit.begin = getelementptr inbounds [3 x i32], [3 x i32]* %ref.tmp, i64 0, i64 0
- store i32 %a, i32* %arrayinit.begin, align 4
- %arrayinit.element = getelementptr inbounds [3 x i32], [3 x i32]* %ref.tmp, i64 0, i64 1
- store i32 %b, i32* %arrayinit.element, align 4
- %arrayinit.element1 = getelementptr inbounds [3 x i32], [3 x i32]* %ref.tmp, i64 0, i64 2
- store i32 %c, i32* %arrayinit.element1, align 4
- %add.ptr.i.i = getelementptr inbounds [3 x i32], [3 x i32]* %ref.tmp, i64 0, i64 3
+ store i32 %a, ptr %ref.tmp, align 4
+ %arrayinit.element = getelementptr inbounds [3 x i32], ptr %ref.tmp, i64 0, i64 1
+ store i32 %b, ptr %arrayinit.element, align 4
+ %arrayinit.element1 = getelementptr inbounds [3 x i32], ptr %ref.tmp, i64 0, i64 2
+ store i32 %c, ptr %arrayinit.element1, align 4
+ %add.ptr.i.i = getelementptr inbounds [3 x i32], ptr %ref.tmp, i64 0, i64 3
%cmp.i.i.i3 = icmp slt i32 %a, %b
- %spec.select.i.i4 = select i1 %cmp.i.i.i3, i32* %arrayinit.element, i32* %arrayinit.begin
- %incdec.ptr.i.i5 = getelementptr inbounds [3 x i32], [3 x i32]* %ref.tmp, i64 0, i64 2
+ %spec.select.i.i4 = select i1 %cmp.i.i.i3, ptr %arrayinit.element, ptr %ref.tmp
+ %incdec.ptr.i.i5 = getelementptr inbounds [3 x i32], ptr %ref.tmp, i64 0, i64 2
br label %loop
loop: ; preds = %entry, %loop
- %incdec.ptr.i.i7 = phi i32* [ %incdec.ptr.i.i5, %entry ], [ %incdec.ptr.i.i, %loop ]
- %spec.select.i.i6 = phi i32* [ %spec.select.i.i4, %entry ], [ %spec.select.i.i, %loop ]
- %.pre = load i32, i32* %spec.select.i.i6, align 4
- %.pre2 = load i32, i32* %incdec.ptr.i.i7, align 4
+ %incdec.ptr.i.i7 = phi ptr [ %incdec.ptr.i.i5, %entry ], [ %incdec.ptr.i.i, %loop ]
+ %spec.select.i.i6 = phi ptr [ %spec.select.i.i4, %entry ], [ %spec.select.i.i, %loop ]
+ %.pre = load i32, ptr %spec.select.i.i6, align 4
+ %.pre2 = load i32, ptr %incdec.ptr.i.i7, align 4
%cmp.i.i.i = icmp slt i32 %.pre, %.pre2
- %spec.select.i.i = select i1 %cmp.i.i.i, i32* %incdec.ptr.i.i7, i32* %spec.select.i.i6
- %incdec.ptr.i.i = getelementptr inbounds i32, i32* %incdec.ptr.i.i7, i64 1
- %cmp1.i.i = icmp eq i32* %incdec.ptr.i.i, %add.ptr.i.i
+ %spec.select.i.i = select i1 %cmp.i.i.i, ptr %incdec.ptr.i.i7, ptr %spec.select.i.i6
+ %incdec.ptr.i.i = getelementptr inbounds i32, ptr %incdec.ptr.i.i7, i64 1
+ %cmp1.i.i = icmp eq ptr %incdec.ptr.i.i, %add.ptr.i.i
br i1 %cmp1.i.i, label %exit, label %loop
exit: ; preds = %loop
- %1 = load i32, i32* %spec.select.i.i, align 4
- ret i32 %1
+ %0 = load i32, ptr %spec.select.i.i, align 4
+ ret i32 %0
}
; CHECK-NO-UNROLL: store
; CHECK-NO-UNROLL-NOT: store
-define void @foo(i32* nocapture %a, i32* nocapture readonly %b) nounwind uwtable {
+define void @foo(ptr nocapture %a, ptr nocapture readonly %b) nounwind uwtable {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 1, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %ld = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %b, i64 %indvars.iv
+ %ld = load i32, ptr %arrayidx, align 4
%idxprom1 = sext i32 %ld to i64
- %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds i32, ptr %a, i64 %idxprom1
%st = trunc i64 %indvars.iv to i32
- store i32 %st, i32* %arrayidx2, align 4
+ store i32 %st, ptr %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 20
br i1 %exitcond, label %for.end, label %for.body
; We just check that some unrolling happened here - the assert we've
; added to ValueHandleBase::operator* would fire if the bug was still
; present.
-; CHECK: atomicrmw volatile add i32*
-; CHECK: atomicrmw volatile add i32*
-; CHECK: atomicrmw volatile add i32*
+; CHECK: atomicrmw volatile add ptr
+; CHECK: atomicrmw volatile add ptr
+; CHECK: atomicrmw volatile add ptr
@global = external global i32, align 4
%tmp = phi i32 [ 0, %bb1 ], [ %tmp34, %bb33 ]
%tmp3 = phi i32 [ 0, %bb1 ], [ %tmp34, %bb33 ]
%tmp26 = and i32 %tmp, 1073741823
- %tmp27 = getelementptr inbounds i32, i32* @global, i32 %tmp26
- %tmp28 = atomicrmw volatile add i32* %tmp27, i32 1 monotonic
+ %tmp27 = getelementptr inbounds i32, ptr @global, i32 %tmp26
+ %tmp28 = atomicrmw volatile add ptr %tmp27, i32 1 monotonic
%tmp29 = icmp ugt i32 %tmp28, 23
%tmp30 = shl i32 %tmp, 6
%tmp31 = add i32 %tmp30, undef
%tmp32 = add i32 %tmp31, %tmp28
- store i32 undef, i32* undef, align 4
+ store i32 undef, ptr undef, align 4
br label %bb33
bb33:
; This test is primarily interested in making sure that latches are not
; folded incorrectly, not that a transform occurs.
-define i1 @test(i64* %a1, i64* %a2) {
+define i1 @test(ptr %a1, ptr %a2) {
; CHECK-LABEL: @test(
; CHECK-NEXT: start:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: latch:
; CHECK-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i64, i64* [[A1:%.*]], i64 [[IV]]
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, i64* [[A2:%.*]], i64 [[IV]]
-; CHECK-NEXT: [[LOAD1:%.*]] = load i64, i64* [[GEP1]], align 8
-; CHECK-NEXT: [[LOAD2:%.*]] = load i64, i64* [[GEP2]], align 8
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i64, ptr [[A1:%.*]], i64 [[IV]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[A2:%.*]], i64 [[IV]]
+; CHECK-NEXT: [[LOAD1:%.*]] = load i64, ptr [[GEP1]], align 8
+; CHECK-NEXT: [[LOAD2:%.*]] = load i64, ptr [[GEP2]], align 8
; CHECK-NEXT: [[EXITCOND2:%.*]] = icmp eq i64 [[LOAD1]], [[LOAD2]]
; CHECK-NEXT: br i1 [[EXITCOND2]], label [[LOOP_1:%.*]], label [[EXIT:%.*]]
; CHECK: loop.1:
; CHECK-NEXT: br label [[LATCH_1:%.*]]
; CHECK: latch.1:
; CHECK-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV_NEXT]], 1
-; CHECK-NEXT: [[GEP1_1:%.*]] = getelementptr inbounds i64, i64* [[A1]], i64 [[IV_NEXT]]
-; CHECK-NEXT: [[GEP2_1:%.*]] = getelementptr inbounds i64, i64* [[A2]], i64 [[IV_NEXT]]
-; CHECK-NEXT: [[LOAD1_1:%.*]] = load i64, i64* [[GEP1_1]], align 8
-; CHECK-NEXT: [[LOAD2_1:%.*]] = load i64, i64* [[GEP2_1]], align 8
+; CHECK-NEXT: [[GEP1_1:%.*]] = getelementptr inbounds i64, ptr [[A1]], i64 [[IV_NEXT]]
+; CHECK-NEXT: [[GEP2_1:%.*]] = getelementptr inbounds i64, ptr [[A2]], i64 [[IV_NEXT]]
+; CHECK-NEXT: [[LOAD1_1:%.*]] = load i64, ptr [[GEP1_1]], align 8
+; CHECK-NEXT: [[LOAD2_1:%.*]] = load i64, ptr [[GEP2_1]], align 8
; CHECK-NEXT: [[EXITCOND2_1:%.*]] = icmp eq i64 [[LOAD1_1]], [[LOAD2_1]]
; CHECK-NEXT: br i1 [[EXITCOND2_1]], label [[LOOP_2:%.*]], label [[EXIT]]
; CHECK: loop.2:
; CHECK-NEXT: br label [[LATCH_2:%.*]]
; CHECK: latch.2:
; CHECK-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV_NEXT_1]], 1
-; CHECK-NEXT: [[GEP1_2:%.*]] = getelementptr inbounds i64, i64* [[A1]], i64 [[IV_NEXT_1]]
-; CHECK-NEXT: [[GEP2_2:%.*]] = getelementptr inbounds i64, i64* [[A2]], i64 [[IV_NEXT_1]]
-; CHECK-NEXT: [[LOAD1_2:%.*]] = load i64, i64* [[GEP1_2]], align 8
-; CHECK-NEXT: [[LOAD2_2:%.*]] = load i64, i64* [[GEP2_2]], align 8
+; CHECK-NEXT: [[GEP1_2:%.*]] = getelementptr inbounds i64, ptr [[A1]], i64 [[IV_NEXT_1]]
+; CHECK-NEXT: [[GEP2_2:%.*]] = getelementptr inbounds i64, ptr [[A2]], i64 [[IV_NEXT_1]]
+; CHECK-NEXT: [[LOAD1_2:%.*]] = load i64, ptr [[GEP1_2]], align 8
+; CHECK-NEXT: [[LOAD2_2:%.*]] = load i64, ptr [[GEP2_2]], align 8
; CHECK-NEXT: [[EXITCOND2_2:%.*]] = icmp eq i64 [[LOAD1_2]], [[LOAD2_2]]
; CHECK-NEXT: br i1 [[EXITCOND2_2]], label [[LOOP_3:%.*]], label [[EXIT]]
; CHECK: loop.3:
; CHECK-NEXT: br label [[LATCH_3:%.*]]
; CHECK: latch.3:
; CHECK-NEXT: [[IV_NEXT_3:%.*]] = add nuw nsw i64 [[IV_NEXT_2]], 1
-; CHECK-NEXT: [[GEP1_3:%.*]] = getelementptr inbounds i64, i64* [[A1]], i64 [[IV_NEXT_2]]
-; CHECK-NEXT: [[GEP2_3:%.*]] = getelementptr inbounds i64, i64* [[A2]], i64 [[IV_NEXT_2]]
-; CHECK-NEXT: [[LOAD1_3:%.*]] = load i64, i64* [[GEP1_3]], align 8
-; CHECK-NEXT: [[LOAD2_3:%.*]] = load i64, i64* [[GEP2_3]], align 8
+; CHECK-NEXT: [[GEP1_3:%.*]] = getelementptr inbounds i64, ptr [[A1]], i64 [[IV_NEXT_2]]
+; CHECK-NEXT: [[GEP2_3:%.*]] = getelementptr inbounds i64, ptr [[A2]], i64 [[IV_NEXT_2]]
+; CHECK-NEXT: [[LOAD1_3:%.*]] = load i64, ptr [[GEP1_3]], align 8
+; CHECK-NEXT: [[LOAD2_3:%.*]] = load i64, ptr [[GEP2_3]], align 8
; CHECK-NEXT: [[EXITCOND2_3:%.*]] = icmp eq i64 [[LOAD1_3]], [[LOAD2_3]]
; CHECK-NEXT: br i1 [[EXITCOND2_3]], label [[LOOP_4:%.*]], label [[EXIT]]
; CHECK: loop.4:
; CHECK-NEXT: br i1 [[EXITCOND_4]], label [[EXIT]], label [[LATCH_4]]
; CHECK: latch.4:
; CHECK-NEXT: [[IV_NEXT_4]] = add nuw nsw i64 [[IV_NEXT_3]], 1
-; CHECK-NEXT: [[GEP1_4:%.*]] = getelementptr inbounds i64, i64* [[A1]], i64 [[IV_NEXT_3]]
-; CHECK-NEXT: [[GEP2_4:%.*]] = getelementptr inbounds i64, i64* [[A2]], i64 [[IV_NEXT_3]]
-; CHECK-NEXT: [[LOAD1_4:%.*]] = load i64, i64* [[GEP1_4]], align 8
-; CHECK-NEXT: [[LOAD2_4:%.*]] = load i64, i64* [[GEP2_4]], align 8
+; CHECK-NEXT: [[GEP1_4:%.*]] = getelementptr inbounds i64, ptr [[A1]], i64 [[IV_NEXT_3]]
+; CHECK-NEXT: [[GEP2_4:%.*]] = getelementptr inbounds i64, ptr [[A2]], i64 [[IV_NEXT_3]]
+; CHECK-NEXT: [[LOAD1_4:%.*]] = load i64, ptr [[GEP1_4]], align 8
+; CHECK-NEXT: [[LOAD2_4:%.*]] = load i64, ptr [[GEP2_4]], align 8
; CHECK-NEXT: [[EXITCOND2_4:%.*]] = icmp eq i64 [[LOAD1_4]], [[LOAD2_4]]
; CHECK-NEXT: br i1 [[EXITCOND2_4]], label [[LOOP]], label [[EXIT]]
; CHECK: exit:
latch:
%iv.next = add nuw nsw i64 %iv, 1
- %gep1 = getelementptr inbounds i64, i64* %a1, i64 %iv
- %gep2 = getelementptr inbounds i64, i64* %a2, i64 %iv
- %load1 = load i64, i64* %gep1, align 8
- %load2 = load i64, i64* %gep2, align 8
+ %gep1 = getelementptr inbounds i64, ptr %a1, i64 %iv
+ %gep2 = getelementptr inbounds i64, ptr %a2, i64 %iv
+ %load1 = load i64, ptr %gep1, align 8
+ %load2 = load i64, ptr %gep2, align 8
%exitcond2 = icmp eq i64 %load1, %load2
br i1 %exitcond2, label %loop, label %exit
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT_3:%.*]], [[FOR_LATCH_3]] ]
; CHECK-NEXT: [[RED_NEXT:%.*]] = add nuw nsw i32 10, [[RED]]
; CHECK-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 2
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [344 x i32], [344 x i32]* @table, i64 0, i64 [[IV_NEXT]]
-; CHECK-NEXT: store i32 [[RED_NEXT]], i32* [[PTR]], align 4
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [344 x i32], ptr @table, i64 0, i64 [[IV_NEXT]]
+; CHECK-NEXT: store i32 [[RED_NEXT]], ptr [[PTR]], align 4
; CHECK-NEXT: br label [[FOR_LATCH:%.*]]
; CHECK: for.latch:
; CHECK-NEXT: [[RED_NEXT_1:%.*]] = add nuw nsw i32 10, [[RED_NEXT]]
; CHECK-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV_NEXT]], 2
-; CHECK-NEXT: [[PTR_1:%.*]] = getelementptr inbounds [344 x i32], [344 x i32]* @table, i64 0, i64 [[IV_NEXT_1]]
-; CHECK-NEXT: store i32 [[RED_NEXT_1]], i32* [[PTR_1]], align 4
+; CHECK-NEXT: [[PTR_1:%.*]] = getelementptr inbounds [344 x i32], ptr @table, i64 0, i64 [[IV_NEXT_1]]
+; CHECK-NEXT: store i32 [[RED_NEXT_1]], ptr [[PTR_1]], align 4
; CHECK-NEXT: br label [[FOR_LATCH_1:%.*]]
; CHECK: for.latch.1:
; CHECK-NEXT: [[RED_NEXT_2:%.*]] = add nuw nsw i32 10, [[RED_NEXT_1]]
; CHECK-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV_NEXT_1]], 2
-; CHECK-NEXT: [[PTR_2:%.*]] = getelementptr inbounds [344 x i32], [344 x i32]* @table, i64 0, i64 [[IV_NEXT_2]]
-; CHECK-NEXT: store i32 [[RED_NEXT_2]], i32* [[PTR_2]], align 4
+; CHECK-NEXT: [[PTR_2:%.*]] = getelementptr inbounds [344 x i32], ptr @table, i64 0, i64 [[IV_NEXT_2]]
+; CHECK-NEXT: store i32 [[RED_NEXT_2]], ptr [[PTR_2]], align 4
; CHECK-NEXT: br label [[FOR_LATCH_2:%.*]]
; CHECK: for.latch.2:
; CHECK-NEXT: [[RED_NEXT_3]] = add nuw nsw i32 10, [[RED_NEXT_2]]
; CHECK-NEXT: [[IV_NEXT_3]] = add nuw nsw i64 [[IV_NEXT_2]], 2
-; CHECK-NEXT: [[PTR_3:%.*]] = getelementptr inbounds [344 x i32], [344 x i32]* @table, i64 0, i64 [[IV_NEXT_3]]
-; CHECK-NEXT: store i32 [[RED_NEXT_3]], i32* [[PTR_3]], align 4
+; CHECK-NEXT: [[PTR_3:%.*]] = getelementptr inbounds [344 x i32], ptr @table, i64 0, i64 [[IV_NEXT_3]]
+; CHECK-NEXT: store i32 [[RED_NEXT_3]], ptr [[PTR_3]], align 4
; CHECK-NEXT: [[EXITCOND_1_I_3:%.*]] = icmp eq i64 [[IV_NEXT_3]], 344
; CHECK-NEXT: br i1 [[EXITCOND_1_I_3]], label [[EXIT:%.*]], label [[FOR_LATCH_3]]
; CHECK: for.latch.3:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.latch ]
%red.next = add i32 10, %red
%iv.next = add nuw nsw i64 %iv, 2
- %ptr = getelementptr inbounds [344 x i32], [344 x i32]* @table, i64 0, i64 %iv.next
- store i32 %red.next, i32* %ptr, align 4
+ %ptr = getelementptr inbounds [344 x i32], ptr @table, i64 0, i64 %iv.next
+ store i32 %red.next, ptr %ptr, align 4
%exitcond.1.i = icmp eq i64 %iv.next, 344
br i1 %exitcond.1.i, label %exit, label %for.latch
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* @a, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [8 x i32], ptr @a, i64 0, i64 %indvars.iv
%0 = trunc i64 %indvars.iv to i32
- store i32 %0, i32* %arrayidx, align 4
+ store i32 %0, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp ne i64 %indvars.iv.next, 8
br i1 %exitcond, label %for.body, label %for.exit
ret void
}
-define void @test17() personality i8* undef{
+define void @test17() personality ptr undef{
; CHECK-LABEL: @test17(
; CHECK-NEXT: body:
; CHECK-NEXT: br label [[LOOP_PEEL_BEGIN:%.*]]
; CHECK-NEXT: invoke void @f1()
; CHECK-NEXT: to label [[LOOP]] unwind label [[EH_UNW_LOOPEXIT_LOOPEXIT:%.*]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: eh.Unw.loopexit.loopexit:
-; CHECK-NEXT: [[LPAD_LOOPEXIT2:%.*]] = landingpad { i8*, i32 }
-; CHECK-NEXT: catch i8* null
+; CHECK-NEXT: [[LPAD_LOOPEXIT2:%.*]] = landingpad { ptr, i32 }
+; CHECK-NEXT: catch ptr null
; CHECK-NEXT: br label [[EH_UNW_LOOPEXIT:%.*]]
; CHECK: eh.Unw.loopexit.loopexit.split-lp:
-; CHECK-NEXT: [[LPAD_LOOPEXIT_SPLIT_LP:%.*]] = landingpad { i8*, i32 }
-; CHECK-NEXT: catch i8* null
+; CHECK-NEXT: [[LPAD_LOOPEXIT_SPLIT_LP:%.*]] = landingpad { ptr, i32 }
+; CHECK-NEXT: catch ptr null
; CHECK-NEXT: br label [[EH_UNW_LOOPEXIT]]
; CHECK: eh.Unw.loopexit:
; CHECK-NEXT: ret void
to label %loop unwind label %eh.Unw.loopexit
eh.Unw.loopexit:
- %lpad.loopexit = landingpad { i8*, i32 }
- catch i8* null
+ %lpad.loopexit = landingpad { ptr, i32 }
+ catch ptr null
ret void
}
; Testcase reduced from PR48812.
-define void @test18(i32* %p) {
+define void @test18(ptr %p) {
; CHECK-LABEL: @test18(
; CHECK-NEXT: init:
; CHECK-NEXT: br label [[LOOP_PEEL_BEGIN:%.*]]
; CHECK: loop.peel:
; CHECK-NEXT: br label [[LATCH_PEEL:%.*]]
; CHECK: latch.peel:
-; CHECK-NEXT: [[CONTROL_PEEL:%.*]] = load volatile i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT: [[CONTROL_PEEL:%.*]] = load volatile i32, ptr [[P:%.*]], align 4
; CHECK-NEXT: switch i32 [[CONTROL_PEEL]], label [[EXIT:%.*]] [
; CHECK-NEXT: i32 2, label [[LOOP_PEEL_NEXT:%.*]]
; CHECK-NEXT: ]
; CHECK: loop:
; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: latch:
-; CHECK-NEXT: [[CONTROL:%.*]] = load volatile i32, i32* [[P]], align 4
+; CHECK-NEXT: [[CONTROL:%.*]] = load volatile i32, ptr [[P]], align 4
; CHECK-NEXT: switch i32 [[CONTROL]], label [[EXIT_LOOPEXIT:%.*]] [
; CHECK-NEXT: i32 2, label [[LOOP]]
; CHECK-NEXT: ], !llvm.loop [[LOOP14:![0-9]+]]
br label %latch
latch:
- %control = load volatile i32, i32* %p
+ %control = load volatile i32, ptr %p
switch i32 %control, label %exit [
i32 2, label %loop
]
; Loop peeling must result in valid scope declartions
-define internal fastcc void @test01(i8* %p0, i8* %p1, i8* %p2) unnamed_addr align 2 {
+define internal fastcc void @test01(ptr %p0, ptr %p1, ptr %p2) unnamed_addr align 2 {
; CHECK-LABEL: @test01(
; CHECK-NEXT: for.body47.lr.ph:
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata !0)
; CHECK-NEXT: br label [[FOR_BODY47_PEEL:%.*]]
; CHECK: for.body47.peel:
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata !3)
-; CHECK-NEXT: store i8 42, i8* [[P0:%.*]], align 1, !alias.scope !3
-; CHECK-NEXT: store i8 43, i8* [[P1:%.*]], align 1, !alias.scope !0
-; CHECK-NEXT: store i8 44, i8* [[P2:%.*]], align 1, !alias.scope !5
-; CHECK-NEXT: store i8 42, i8* [[P0]], align 1, !noalias !3
-; CHECK-NEXT: store i8 43, i8* [[P1]], align 1, !noalias !0
-; CHECK-NEXT: store i8 44, i8* [[P2]], align 1, !noalias !5
+; CHECK-NEXT: store i8 42, ptr [[P0:%.*]], align 1, !alias.scope !3
+; CHECK-NEXT: store i8 43, ptr [[P1:%.*]], align 1, !alias.scope !0
+; CHECK-NEXT: store i8 44, ptr [[P2:%.*]], align 1, !alias.scope !5
+; CHECK-NEXT: store i8 42, ptr [[P0]], align 1, !noalias !3
+; CHECK-NEXT: store i8 43, ptr [[P1]], align 1, !noalias !0
+; CHECK-NEXT: store i8 44, ptr [[P2]], align 1, !noalias !5
; CHECK-NEXT: [[CMP52_PEEL:%.*]] = icmp eq i32 0, 0
; CHECK-NEXT: br i1 [[CMP52_PEEL]], label [[COND_TRUE_PEEL:%.*]], label [[COND_END_PEEL:%.*]]
; CHECK: cond.true.peel:
-; CHECK-NEXT: store i8 52, i8* [[P0]], align 1, !alias.scope !3
-; CHECK-NEXT: store i8 53, i8* [[P1]], align 1, !alias.scope !0
-; CHECK-NEXT: store i8 54, i8* [[P2]], align 1, !alias.scope !5
-; CHECK-NEXT: store i8 52, i8* [[P0]], align 1, !noalias !3
-; CHECK-NEXT: store i8 53, i8* [[P1]], align 1, !noalias !0
-; CHECK-NEXT: store i8 54, i8* [[P2]], align 1, !noalias !5
+; CHECK-NEXT: store i8 52, ptr [[P0]], align 1, !alias.scope !3
+; CHECK-NEXT: store i8 53, ptr [[P1]], align 1, !alias.scope !0
+; CHECK-NEXT: store i8 54, ptr [[P2]], align 1, !alias.scope !5
+; CHECK-NEXT: store i8 52, ptr [[P0]], align 1, !noalias !3
+; CHECK-NEXT: store i8 53, ptr [[P1]], align 1, !noalias !0
+; CHECK-NEXT: store i8 54, ptr [[P2]], align 1, !noalias !5
; CHECK-NEXT: br label [[COND_END_PEEL]]
; CHECK: cond.end.peel:
-; CHECK-NEXT: store i8 62, i8* [[P0]], align 1, !alias.scope !3
-; CHECK-NEXT: store i8 63, i8* [[P1]], align 1, !alias.scope !0
-; CHECK-NEXT: store i8 64, i8* [[P2]], align 1, !alias.scope !5
-; CHECK-NEXT: store i8 62, i8* [[P0]], align 1, !noalias !3
-; CHECK-NEXT: store i8 63, i8* [[P1]], align 1, !noalias !0
-; CHECK-NEXT: store i8 64, i8* [[P2]], align 1, !noalias !5
+; CHECK-NEXT: store i8 62, ptr [[P0]], align 1, !alias.scope !3
+; CHECK-NEXT: store i8 63, ptr [[P1]], align 1, !alias.scope !0
+; CHECK-NEXT: store i8 64, ptr [[P2]], align 1, !alias.scope !5
+; CHECK-NEXT: store i8 62, ptr [[P0]], align 1, !noalias !3
+; CHECK-NEXT: store i8 63, ptr [[P1]], align 1, !noalias !0
+; CHECK-NEXT: store i8 64, ptr [[P2]], align 1, !noalias !5
; CHECK-NEXT: [[INC_PEEL:%.*]] = add nuw i32 0, 1
; CHECK-NEXT: [[EXITCOND_NOT_PEEL:%.*]] = icmp eq i32 [[INC_PEEL]], undef
; CHECK-NEXT: br i1 [[EXITCOND_NOT_PEEL]], label [[FOR_COND_CLEANUP46:%.*]], label [[FOR_BODY47_PEEL_NEXT:%.*]]
; CHECK: for.body47:
; CHECK-NEXT: [[J_02:%.*]] = phi i32 [ [[INC_PEEL]], [[FOR_BODY47_LR_PH_PEEL_NEWPH]] ], [ [[INC:%.*]], [[COND_END:%.*]] ]
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata !6)
-; CHECK-NEXT: store i8 42, i8* [[P0]], align 1, !alias.scope !6
-; CHECK-NEXT: store i8 43, i8* [[P1]], align 1, !alias.scope !0
-; CHECK-NEXT: store i8 44, i8* [[P2]], align 1, !alias.scope !8
-; CHECK-NEXT: store i8 42, i8* [[P0]], align 1, !noalias !6
-; CHECK-NEXT: store i8 43, i8* [[P1]], align 1, !noalias !0
-; CHECK-NEXT: store i8 44, i8* [[P2]], align 1, !noalias !8
+; CHECK-NEXT: store i8 42, ptr [[P0]], align 1, !alias.scope !6
+; CHECK-NEXT: store i8 43, ptr [[P1]], align 1, !alias.scope !0
+; CHECK-NEXT: store i8 44, ptr [[P2]], align 1, !alias.scope !8
+; CHECK-NEXT: store i8 42, ptr [[P0]], align 1, !noalias !6
+; CHECK-NEXT: store i8 43, ptr [[P1]], align 1, !noalias !0
+; CHECK-NEXT: store i8 44, ptr [[P2]], align 1, !noalias !8
; CHECK-NEXT: br i1 false, label [[COND_TRUE:%.*]], label [[COND_END]]
; CHECK: cond.true:
-; CHECK-NEXT: store i8 52, i8* [[P0]], align 1, !alias.scope !6
-; CHECK-NEXT: store i8 53, i8* [[P1]], align 1, !alias.scope !0
-; CHECK-NEXT: store i8 54, i8* [[P2]], align 1, !alias.scope !8
-; CHECK-NEXT: store i8 52, i8* [[P0]], align 1, !noalias !6
-; CHECK-NEXT: store i8 53, i8* [[P1]], align 1, !noalias !0
-; CHECK-NEXT: store i8 54, i8* [[P2]], align 1, !noalias !8
+; CHECK-NEXT: store i8 52, ptr [[P0]], align 1, !alias.scope !6
+; CHECK-NEXT: store i8 53, ptr [[P1]], align 1, !alias.scope !0
+; CHECK-NEXT: store i8 54, ptr [[P2]], align 1, !alias.scope !8
+; CHECK-NEXT: store i8 52, ptr [[P0]], align 1, !noalias !6
+; CHECK-NEXT: store i8 53, ptr [[P1]], align 1, !noalias !0
+; CHECK-NEXT: store i8 54, ptr [[P2]], align 1, !noalias !8
; CHECK-NEXT: br label [[COND_END]]
; CHECK: cond.end:
-; CHECK-NEXT: store i8 62, i8* [[P0]], align 1, !alias.scope !6
-; CHECK-NEXT: store i8 63, i8* [[P1]], align 1, !alias.scope !0
-; CHECK-NEXT: store i8 64, i8* [[P2]], align 1, !alias.scope !8
-; CHECK-NEXT: store i8 62, i8* [[P0]], align 1, !noalias !6
-; CHECK-NEXT: store i8 63, i8* [[P1]], align 1, !noalias !0
-; CHECK-NEXT: store i8 64, i8* [[P2]], align 1, !noalias !8
+; CHECK-NEXT: store i8 62, ptr [[P0]], align 1, !alias.scope !6
+; CHECK-NEXT: store i8 63, ptr [[P1]], align 1, !alias.scope !0
+; CHECK-NEXT: store i8 64, ptr [[P2]], align 1, !alias.scope !8
+; CHECK-NEXT: store i8 62, ptr [[P0]], align 1, !noalias !6
+; CHECK-NEXT: store i8 63, ptr [[P1]], align 1, !noalias !0
+; CHECK-NEXT: store i8 64, ptr [[P2]], align 1, !noalias !8
; CHECK-NEXT: [[INC]] = add nuw i32 [[J_02]], 1
; CHECK-NEXT: br i1 undef, label [[FOR_COND_CLEANUP46_LOOPEXIT:%.*]], label [[FOR_BODY47]], [[LOOP9:!llvm.loop !.*]]
;
for.body47: ; preds = %cond.end, %for.body47.lr.ph
%j.02 = phi i32 [ 0, %for.body47.lr.ph ], [ %inc, %cond.end ]
call void @llvm.experimental.noalias.scope.decl(metadata !0)
- store i8 42, i8* %p0, !alias.scope !0
- store i8 43, i8* %p1, !alias.scope !5
- store i8 44, i8* %p2, !alias.scope !7
- store i8 42, i8* %p0, !noalias !0
- store i8 43, i8* %p1, !noalias !5
- store i8 44, i8* %p2, !noalias !7
+ store i8 42, ptr %p0, !alias.scope !0
+ store i8 43, ptr %p1, !alias.scope !5
+ store i8 44, ptr %p2, !alias.scope !7
+ store i8 42, ptr %p0, !noalias !0
+ store i8 43, ptr %p1, !noalias !5
+ store i8 44, ptr %p2, !noalias !7
%cmp52 = icmp eq i32 %j.02, 0
br i1 %cmp52, label %cond.true, label %cond.end
cond.true: ; preds = %for.body47
- store i8 52, i8* %p0, !alias.scope !0
- store i8 53, i8* %p1, !alias.scope !5
- store i8 54, i8* %p2, !alias.scope !7
- store i8 52, i8* %p0, !noalias !0
- store i8 53, i8* %p1, !noalias !5
- store i8 54, i8* %p2, !noalias !7
+ store i8 52, ptr %p0, !alias.scope !0
+ store i8 53, ptr %p1, !alias.scope !5
+ store i8 54, ptr %p2, !alias.scope !7
+ store i8 52, ptr %p0, !noalias !0
+ store i8 53, ptr %p1, !noalias !5
+ store i8 54, ptr %p2, !noalias !7
br label %cond.end
cond.end: ; preds = %cond.true, %for.body47
- store i8 62, i8* %p0, !alias.scope !0
- store i8 63, i8* %p1, !alias.scope !5
- store i8 64, i8* %p2, !alias.scope !7
- store i8 62, i8* %p0, !noalias !0
- store i8 63, i8* %p1, !noalias !5
- store i8 64, i8* %p2, !noalias !7
+ store i8 62, ptr %p0, !alias.scope !0
+ store i8 63, ptr %p1, !alias.scope !5
+ store i8 64, ptr %p2, !alias.scope !7
+ store i8 62, ptr %p0, !noalias !0
+ store i8 63, ptr %p1, !noalias !5
+ store i8 64, ptr %p2, !noalias !7
%inc = add nuw i32 %j.02, 1
%exitcond.not = icmp eq i32 %inc, undef
br i1 %exitcond.not, label %for.cond.cleanup46, label %for.body47, !llvm.loop !3
; CHECK: Loop Unroll: F[basic]
; CHECK: PEELING loop %for.body with iteration count 2!
-define i32 @basic(i32* %p, i32 %k, i1 %c1, i1 %c2) #0 !prof !3 {
+define i32 @basic(ptr %p, i32 %k, i1 %c1, i1 %c2) #0 !prof !3 {
entry:
br label %for.body
for.body:
%i.05 = phi i32 [ 0, %entry ], [ %inc, %latch ]
- %p.addr.04 = phi i32* [ %p, %entry ], [ %incdec.ptr, %latch ]
- %incdec.ptr = getelementptr inbounds i32, i32* %p.addr.04, i32 1
- store i32 %i.05, i32* %p.addr.04, align 4
+ %p.addr.04 = phi ptr [ %p, %entry ], [ %incdec.ptr, %latch ]
+ %incdec.ptr = getelementptr inbounds i32, ptr %p.addr.04, i32 1
+ store i32 %i.05, ptr %p.addr.04, align 4
%inc = add nsw i32 %i.05, 1
%cmp = icmp slt i32 %inc, %k
br i1 %c1, label %left, label %right
; CHECK: Loop Unroll: F[basic]
; CHECK: PEELING loop %for.body with iteration count 2!
-define i32 @basic(i32* %p, i32 %k, i1 %c1, i1 %c2) #0 !prof !3 {
+define i32 @basic(ptr %p, i32 %k, i1 %c1, i1 %c2) #0 !prof !3 {
entry:
%cmp3 = icmp slt i32 0, %k
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.05 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %latch ]
- %p.addr.04 = phi i32* [ %p, %for.body.lr.ph ], [ %incdec.ptr, %latch ]
- %incdec.ptr = getelementptr inbounds i32, i32* %p.addr.04, i32 1
- store i32 %i.05, i32* %p.addr.04, align 4
+ %p.addr.04 = phi ptr [ %p, %for.body.lr.ph ], [ %incdec.ptr, %latch ]
+ %incdec.ptr = getelementptr inbounds i32, ptr %p.addr.04, i32 1
+ store i32 %i.05, ptr %p.addr.04, align 4
%inc = add nsw i32 %i.05, 1
%cmp = icmp slt i32 %inc, %k
br i1 %c1, label %continue, label %to_side_exit
; CHECK: br i1 %c, label %{{.*}}, label %side_exit.loopexit, !prof !15
; CHECK: br i1 %{{.*}}, label %for.body, label %{{.*}}, !prof !19
-define i32 @basic(i32* %p, i32 %k, i1 %c) #0 !prof !15 {
+define i32 @basic(ptr %p, i32 %k, i1 %c) #0 !prof !15 {
entry:
%cmp3 = icmp slt i32 0, %k
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.05 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %continue ]
- %p.addr.04 = phi i32* [ %p, %for.body.lr.ph ], [ %incdec.ptr, %continue ]
- %incdec.ptr = getelementptr inbounds i32, i32* %p.addr.04, i32 1
- store i32 %i.05, i32* %p.addr.04, align 4
+ %p.addr.04 = phi ptr [ %p, %for.body.lr.ph ], [ %incdec.ptr, %continue ]
+ %incdec.ptr = getelementptr inbounds i32, ptr %p.addr.04, i32 1
+ store i32 %i.05, ptr %p.addr.04, align 4
%inc = add nsw i32 %i.05, 1
%cmp = icmp slt i32 %inc, %k
br i1 %c, label %continue, label %side_exit, !prof !17
; CHECK: [[NEXT2]]:
; CHECK: br i1 %{{.*}}, label %for.body, label %{{.*}}, !prof !18
-define void @basic(i32* %p, i32 %k) #0 !prof !15 {
+define void @basic(ptr %p, i32 %k) #0 !prof !15 {
entry:
%cmp3 = icmp slt i32 0, %k
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.05 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
- %p.addr.04 = phi i32* [ %p, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
- %incdec.ptr = getelementptr inbounds i32, i32* %p.addr.04, i32 1
- store i32 %i.05, i32* %p.addr.04, align 4
+ %p.addr.04 = phi ptr [ %p, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
+ %incdec.ptr = getelementptr inbounds i32, ptr %p.addr.04, i32 1
+ store i32 %i.05, ptr %p.addr.04, align 4
%inc = add nsw i32 %i.05, 1
%cmp = icmp slt i32 %inc, %k
br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge, !prof !16
; CHECK: for.body:
; CHECK-NOT: br
; CHECK: br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
-define void @optsize(i32* %p, i32 %k) #1 !prof !15 {
+define void @optsize(ptr %p, i32 %k) #1 !prof !15 {
entry:
%cmp3 = icmp slt i32 0, %k
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.05 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
- %p.addr.04 = phi i32* [ %p, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
- %incdec.ptr = getelementptr inbounds i32, i32* %p.addr.04, i32 1
- store i32 %i.05, i32* %p.addr.04, align 4
+ %p.addr.04 = phi ptr [ %p, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
+ %incdec.ptr = getelementptr inbounds i32, ptr %p.addr.04, i32 1
+ store i32 %i.05, ptr %p.addr.04, align 4
%inc = add nsw i32 %i.05, 1
%cmp = icmp slt i32 %inc, %k
br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge, !prof !16
; CHECK: %[[CMP0:.*]] = icmp sgt i32 %k, 0
; CHECK: br i1 %[[CMP0]], label %[[NEXT0:.*]], label %for.end
; CHECK: [[NEXT0]]:
-; CHECK: store i32 0, i32* %p, align 4
+; CHECK: store i32 0, ptr %p, align 4
; CHECK: %[[CMP1:.*]] = icmp eq i32 %k, 1
; CHECK: br i1 %[[CMP1]], label %for.end, label %[[NEXT1:[^,]*]]
; Verify that MD_loop metadata is dropped.
; CHECK-NOT: , !llvm.loop !{{[0-9]*}}
; CHECK: [[NEXT1]]:
-; CHECK: %[[INC1:.*]] = getelementptr inbounds i32, i32* %p, i64 1
-; CHECK: store i32 1, i32* %[[INC1]], align 4
+; CHECK: %[[INC1:.*]] = getelementptr inbounds i32, ptr %p, i64 1
+; CHECK: store i32 1, ptr %[[INC1]], align 4
; CHECK: %[[CMP2:.*]] = icmp sgt i32 %k, 2
; CHECK: br i1 %[[CMP2]], label %[[NEXT2:.*]], label %for.end
; Verify that MD_loop metadata is dropped.
; CHECK-NOT: , !llvm.loop !{{[0-9]*}}
; CHECK: [[NEXT2]]:
-; CHECK: %[[INC2:.*]] = getelementptr inbounds i32, i32* %p, i64 2
-; CHECK: store i32 2, i32* %[[INC2]], align 4
+; CHECK: %[[INC2:.*]] = getelementptr inbounds i32, ptr %p, i64 2
+; CHECK: store i32 2, ptr %[[INC2]], align 4
; CHECK: %[[CMP3:.*]] = icmp eq i32 %k, 3
; CHECK: br i1 %[[CMP3]], label %for.end, label %[[LOOP_PH:[^,]*]]
; Verify that MD_loop metadata is dropped.
; CHECK: for.end:
; CHECK: ret void
-define void @basic(i32* %p, i32 %k) #0 {
+define void @basic(ptr %p, i32 %k) #0 {
entry:
%cmp3 = icmp slt i32 0, %k
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.05 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
- %p.addr.04 = phi i32* [ %p, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
- %incdec.ptr = getelementptr inbounds i32, i32* %p.addr.04, i32 1
- store i32 %i.05, i32* %p.addr.04, align 4
+ %p.addr.04 = phi ptr [ %p, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
+ %incdec.ptr = getelementptr inbounds i32, ptr %p.addr.04, i32 1
+ store i32 %i.05, ptr %p.addr.04, align 4
%inc = add nsw i32 %i.05, 1
%cmp = icmp slt i32 %inc, %k
br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge, !llvm.loop !1
; CHECK: %[[CMP0:.*]] = icmp sgt i32 %k, 0
; CHECK: br i1 %[[CMP0]], label %[[NEXT0:.*]], label %for.end
; CHECK: [[NEXT0]]:
-; CHECK: store i32 0, i32* %p, align 4
+; CHECK: store i32 0, ptr %p, align 4
; CHECK: %[[CMP1:.*]] = icmp eq i32 %k, 1
; CHECK: br i1 %[[CMP1]], label %for.end, label %[[NEXT1:[^,]*]]
; Verify that MD_loop metadata is dropped.
; CHECK-NOT: , !llvm.loop !{{[0-9]*}}
; CHECK: [[NEXT1]]:
-; CHECK: %[[INC1:.*]] = getelementptr inbounds i32, i32* %p, i64 1
-; CHECK: store i32 1, i32* %[[INC1]], align 4
+; CHECK: %[[INC1:.*]] = getelementptr inbounds i32, ptr %p, i64 1
+; CHECK: store i32 1, ptr %[[INC1]], align 4
; CHECK: %[[CMP2:.*]] = icmp sgt i32 %k, 2
; CHECK: br i1 %[[CMP2]], label %[[NEXT2:.*]], label %for.end
; Verify that MD_loop metadata is dropped.
; CHECK-NOT: , !llvm.loop !{{[0-9]*}}
; CHECK: [[NEXT2]]:
-; CHECK: %[[INC2:.*]] = getelementptr inbounds i32, i32* %p, i64 2
-; CHECK: store i32 2, i32* %[[INC2]], align 4
+; CHECK: %[[INC2:.*]] = getelementptr inbounds i32, ptr %p, i64 2
+; CHECK: store i32 2, ptr %[[INC2]], align 4
; CHECK: %[[CMP3:.*]] = icmp eq i32 %k, 3
; CHECK: br i1 %[[CMP3]], label %for.end, label %[[LOOP_PH:[^,]*]]
; Verify that MD_loop metadata is dropped.
; CHECK: for.end:
; CHECK: %ret = phi i32 [ 0, %entry ], [ 1, %[[NEXT0]] ], [ 2, %[[NEXT1]] ], [ 3, %[[NEXT2]] ], [ %inc, %for.body ]
; CHECK: ret i32 %ret
-define i32 @output(i32* %p, i32 %k) #0 {
+define i32 @output(ptr %p, i32 %k) #0 {
entry:
%cmp3 = icmp slt i32 0, %k
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.05 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
- %p.addr.04 = phi i32* [ %p, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
- %incdec.ptr = getelementptr inbounds i32, i32* %p.addr.04, i32 1
- store i32 %i.05, i32* %p.addr.04, align 4
+ %p.addr.04 = phi ptr [ %p, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
+ %incdec.ptr = getelementptr inbounds i32, ptr %p.addr.04, i32 1
+ store i32 %i.05, ptr %p.addr.04, align 4
%inc = add nsw i32 %i.05, 1
%cmp = icmp slt i32 %inc, %k
br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge, !llvm.loop !2
declare void @funcb()
@Comma = external global i8
-define void @funca(i8* readnone %b, i8* readnone %e) {
+define void @funca(ptr readnone %b, ptr readnone %e) {
entry:
- %cmp2 = icmp eq i8* %b, %e
+ %cmp2 = icmp eq ptr %b, %e
br i1 %cmp2, label %for.end, label %for.body.preheader
for.body.preheader:
br label %for.body
for.body:
- %b.addr.03 = phi i8* [ %incdec.ptr, %for.inc ], [ %b, %for.body.preheader ]
- %0 = load i8, i8* @Comma
+ %b.addr.03 = phi ptr [ %incdec.ptr, %for.inc ], [ %b, %for.body.preheader ]
+ %0 = load i8, ptr @Comma
%tobool = icmp eq i8 %0, 0
br i1 %tobool, label %for.inc, label %if.then
if.then:
tail call void @funcb()
- store i8 1, i8* @Comma
+ store i8 1, ptr @Comma
br label %for.inc
for.inc:
- %incdec.ptr = getelementptr inbounds i8, i8* %b.addr.03, i64 1
- %cmp = icmp eq i8* %incdec.ptr, %e
+ %incdec.ptr = getelementptr inbounds i8, ptr %b.addr.03, i64 1
+ %cmp = icmp eq ptr %incdec.ptr, %e
br i1 %cmp, label %for.end, label %for.body
for.end:
; CHECK_LABEL: @funca
; Peeled iteration
-; CHECK: %[[REG1:[0-9]+]] = load i8, i8* @Comma
+; CHECK: %[[REG1:[0-9]+]] = load i8, ptr @Comma
; CHECK: %[[REG2:.*]] = icmp eq i8 %[[REG1]], 0
; CHECK: br i1 %[[REG2]], label %{{.*}}, label %[[IFTHEN:.*]]
; CHECK: [[IFTHEN]]:
; CHECK: call void @funcb()
-; CHECK: store i8 1, i8* @Comma
+; CHECK: store i8 1, ptr @Comma
; CHECK: br label %[[FORINC]]
; CHECK: [[FORINC]]:
-; CHECK: %[[REG3:.*]] = getelementptr inbounds i8, i8* %b, i64 1
-; CHECK: %[[REG4:.*]] = icmp eq i8* %[[REG3]], %e
+; CHECK: %[[REG3:.*]] = getelementptr inbounds i8, ptr %b, i64 1
+; CHECK: %[[REG4:.*]] = icmp eq ptr %[[REG3]], %e
; CHECK: br i1 %[[REG4]]
; main body
-; CHECK: %[[REG1b:.*]] = load i8, i8* @Comma
+; CHECK: %[[REG1b:.*]] = load i8, ptr @Comma
; CHECK: %[[REG2b:.*]] = icmp eq i8 %[[REG1b]], 0
; CHECK: br i1 %[[REG2b]], label %{{.*}}, label %[[IFTHENb:.*]]
; CHECK: [[IFTHENb]]:
; CHECK: call void @funcb()
-; CHECK: store i8 1, i8* @Comma
+; CHECK: store i8 1, ptr @Comma
; CHECK: br label %[[FORINCb]]
; CHECK: [[FORINCb]]:
-; CHECK: %[[REG3b:.*]] = getelementptr inbounds i8, i8* %b, i64 1
-; CHECK: %[[REG4b:.*]] = icmp eq i8* %[[REG3b]], %e
+; CHECK: %[[REG3b:.*]] = getelementptr inbounds i8, ptr %b, i64 1
+; CHECK: %[[REG4b:.*]] = icmp eq ptr %[[REG3b]], %e
; CHECK: br i1 %[[REG4b]]
declare void @foo()
-define void @peel_unreachable_exit_and_latch_exit(i32* %ptr, i32 %N, i32 %x) {
+define void @peel_unreachable_exit_and_latch_exit(ptr %ptr, i32 %N, i32 %x) {
; CHECK-LABEL: @peel_unreachable_exit_and_latch_exit(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP_HEADER_PEEL_BEGIN:%.*]]
; CHECK-NEXT: br label [[LOOP_LATCH_PEEL]]
; CHECK: loop.latch.peel:
; CHECK-NEXT: [[M_PEEL:%.*]] = phi i32 [ 0, [[THEN_PEEL]] ], [ [[X]], [[ELSE_PEEL]] ]
-; CHECK-NEXT: [[GEP_PEEL:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i32 1
-; CHECK-NEXT: store i32 [[M_PEEL]], i32* [[GEP_PEEL]], align 4
+; CHECK-NEXT: [[GEP_PEEL:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i32 1
+; CHECK-NEXT: store i32 [[M_PEEL]], ptr [[GEP_PEEL]], align 4
; CHECK-NEXT: [[IV_NEXT_PEEL:%.*]] = add nuw nsw i32 1, 1
; CHECK-NEXT: [[C_3_PEEL:%.*]] = icmp ult i32 1, 1000
; CHECK-NEXT: br i1 [[C_3_PEEL]], label [[LOOP_HEADER_PEEL_NEXT:%.*]], label [[EXIT:%.*]]
; CHECK-NEXT: br i1 [[C_2]], label [[UNREACHABLE_EXIT_LOOPEXIT:%.*]], label [[LOOP_LATCH]]
; CHECK: loop.latch:
; CHECK-NEXT: [[M:%.*]] = phi i32 [ 0, [[THEN]] ], [ [[X]], [[ELSE]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[PTR]], i32 [[IV]]
-; CHECK-NEXT: store i32 [[M]], i32* [[GEP]], align 4
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i32 [[IV]]
+; CHECK-NEXT: store i32 [[M]], ptr [[GEP]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[C_3:%.*]] = icmp ult i32 [[IV]], 1000
; CHECK-NEXT: br i1 [[C_3]], label [[LOOP_HEADER]], label [[EXIT_LOOPEXIT:%.*]], !llvm.loop [[LOOP0:![0-9]+]]
loop.latch:
%m = phi i32 [ 0, %then ], [ %x, %else ]
- %gep = getelementptr i32, i32* %ptr, i32 %iv
- store i32 %m, i32* %gep
+ %gep = getelementptr i32, ptr %ptr, i32 %iv
+ store i32 %m, ptr %gep
%iv.next = add nuw nsw i32 %iv, 1
%c.3 = icmp ult i32 %iv, 1000
br i1 %c.3, label %loop.header, label %exit
unreachable
}
-define void @peel_unreachable_exit_and_header_exit(i32* %ptr, i32 %N, i32 %x) {
+define void @peel_unreachable_exit_and_header_exit(ptr %ptr, i32 %N, i32 %x) {
; CHECK-LABEL: @peel_unreachable_exit_and_header_exit(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK-NEXT: [[C_2:%.*]] = icmp eq i32 1, [[X:%.*]]
; CHECK-NEXT: br i1 [[C_2]], label [[UNREACHABLE_EXIT:%.*]], label [[LOOP_LATCH:%.*]]
; CHECK: loop.latch:
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i32 1
-; CHECK-NEXT: store i32 [[X]], i32* [[GEP]], align 4
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i32 1
+; CHECK-NEXT: store i32 [[X]], ptr [[GEP]], align 4
; CHECK-NEXT: unreachable
; CHECK: exit:
; CHECK-NEXT: ret void
br i1 %c.2, label %unreachable.exit, label %loop.latch
loop.latch:
- %gep = getelementptr i32, i32* %ptr, i32 %iv
- store i32 %x, i32* %gep
+ %gep = getelementptr i32, ptr %ptr, i32 %iv
+ store i32 %x, ptr %gep
%iv.next = add nuw nsw i32 %iv, 1
br label %loop.header
unreachable
}
-define void @peel_unreachable_and_multiple_reachable_exits(i32* %ptr, i32 %N, i32 %x) {
+define void @peel_unreachable_and_multiple_reachable_exits(ptr %ptr, i32 %N, i32 %x) {
; CHECK-LABEL: @peel_unreachable_and_multiple_reachable_exits(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP_HEADER_PEEL_BEGIN:%.*]]
; CHECK-NEXT: br i1 [[C_2_PEEL]], label [[EXIT:%.*]], label [[LOOP_LATCH_PEEL]]
; CHECK: loop.latch.peel:
; CHECK-NEXT: [[M_PEEL:%.*]] = phi i32 [ 0, [[THEN_PEEL]] ], [ [[X]], [[ELSE_PEEL]] ]
-; CHECK-NEXT: [[GEP_PEEL:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i32 1
-; CHECK-NEXT: store i32 [[M_PEEL]], i32* [[GEP_PEEL]], align 4
+; CHECK-NEXT: [[GEP_PEEL:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i32 1
+; CHECK-NEXT: store i32 [[M_PEEL]], ptr [[GEP_PEEL]], align 4
; CHECK-NEXT: [[IV_NEXT_PEEL:%.*]] = add nuw nsw i32 1, 1
; CHECK-NEXT: [[C_4_PEEL:%.*]] = icmp ult i32 1, 1000
; CHECK-NEXT: br i1 [[C_4_PEEL]], label [[LOOP_HEADER_PEEL_NEXT:%.*]], label [[EXIT]]
; CHECK-NEXT: br i1 [[C_3]], label [[UNREACHABLE_EXIT_LOOPEXIT:%.*]], label [[LOOP_LATCH]]
; CHECK: loop.latch:
; CHECK-NEXT: [[M:%.*]] = phi i32 [ 0, [[THEN]] ], [ [[X]], [[ELSE]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[PTR]], i32 [[IV]]
-; CHECK-NEXT: store i32 [[M]], i32* [[GEP]], align 4
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i32 [[IV]]
+; CHECK-NEXT: store i32 [[M]], ptr [[GEP]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[C_4:%.*]] = icmp ult i32 [[IV]], 1000
; CHECK-NEXT: br i1 [[C_4]], label [[LOOP_HEADER]], label [[EXIT_LOOPEXIT]], !llvm.loop [[LOOP2:![0-9]+]]
loop.latch:
%m = phi i32 [ 0, %then ], [ %x, %else ]
- %gep = getelementptr i32, i32* %ptr, i32 %iv
- store i32 %m, i32* %gep
+ %gep = getelementptr i32, ptr %ptr, i32 %iv
+ store i32 %m, ptr %gep
%iv.next = add nuw nsw i32 %iv, 1
%c.4 = icmp ult i32 %iv, 1000
br i1 %c.4, label %loop.header, label %exit
unreachable
}
-define void @peel_exits_to_blocks_branch_to_unreachable_block(i32* %ptr, i32 %N, i32 %x, i1 %c.1) {
+define void @peel_exits_to_blocks_branch_to_unreachable_block(ptr %ptr, i32 %N, i32 %x, i1 %c.1) {
; CHECK-LABEL: @peel_exits_to_blocks_branch_to_unreachable_block(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP_HEADER_PEEL_BEGIN:%.*]]
; CHECK-NEXT: br i1 [[C_1:%.*]], label [[EXIT_1:%.*]], label [[LOOP_LATCH_PEEL]]
; CHECK: loop.latch.peel:
; CHECK-NEXT: [[M_PEEL:%.*]] = phi i32 [ 0, [[THEN_PEEL]] ], [ [[X]], [[ELSE_PEEL]] ]
-; CHECK-NEXT: [[GEP_PEEL:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i32 1
-; CHECK-NEXT: store i32 [[M_PEEL]], i32* [[GEP_PEEL]], align 4
+; CHECK-NEXT: [[GEP_PEEL:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i32 1
+; CHECK-NEXT: store i32 [[M_PEEL]], ptr [[GEP_PEEL]], align 4
; CHECK-NEXT: [[IV_NEXT_PEEL:%.*]] = add nuw nsw i32 1, 1
; CHECK-NEXT: [[C_3_PEEL:%.*]] = icmp ult i32 1, 1000
; CHECK-NEXT: br i1 [[C_3_PEEL]], label [[LOOP_HEADER_PEEL_NEXT:%.*]], label [[EXIT:%.*]]
; CHECK-NEXT: br i1 [[C_2]], label [[EXIT_2_LOOPEXIT:%.*]], label [[LOOP_LATCH]]
; CHECK: loop.latch:
; CHECK-NEXT: [[M:%.*]] = phi i32 [ 0, [[THEN]] ], [ [[X]], [[ELSE]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[PTR]], i32 [[IV]]
-; CHECK-NEXT: store i32 [[M]], i32* [[GEP]], align 4
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i32 [[IV]]
+; CHECK-NEXT: store i32 [[M]], ptr [[GEP]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[C_3:%.*]] = icmp ult i32 [[IV]], 1000
; CHECK-NEXT: br i1 [[C_3]], label [[LOOP_HEADER]], label [[EXIT_LOOPEXIT:%.*]], !llvm.loop [[LOOP3:![0-9]+]]
loop.latch:
%m = phi i32 [ 0, %then ], [ %x, %else ]
- %gep = getelementptr i32, i32* %ptr, i32 %iv
- store i32 %m, i32* %gep
+ %gep = getelementptr i32, ptr %ptr, i32 %iv
+ store i32 %m, ptr %gep
%iv.next = add nuw nsw i32 %iv, 1
%c.3 = icmp ult i32 %iv, 1000
br i1 %c.3, label %loop.header, label %exit
unreachable
}
-define void @peel_exits_to_blocks_branch_to_unreachable_block_with_invariant_load(i32* %ptr, i32 %N, i32 %x, i1 %c.1, i32 %y, i32* %size_ptr) {
+define void @peel_exits_to_blocks_branch_to_unreachable_block_with_invariant_load(ptr %ptr, i32 %N, i32 %x, i1 %c.1, i32 %y, ptr %size_ptr) {
; CHECK-LABEL: @peel_exits_to_blocks_branch_to_unreachable_block_with_invariant_load(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK-NEXT: br i1 [[C_2]], label [[EXIT_2:%.*]], label [[LOOP_LATCH]]
; CHECK: loop.latch:
; CHECK-NEXT: [[M:%.*]] = phi i32 [ 0, [[THEN]] ], [ [[X]], [[ELSE]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i32 [[IV]]
-; CHECK-NEXT: store i32 [[M]], i32* [[GEP]], align 4
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i32 [[IV]]
+; CHECK-NEXT: store i32 [[M]], ptr [[GEP]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
-; CHECK-NEXT: [[SIZE:%.*]] = load i32, i32* [[SIZE_PTR:%.*]], align 4
+; CHECK-NEXT: [[SIZE:%.*]] = load i32, ptr [[SIZE_PTR:%.*]], align 4
; CHECK-NEXT: [[C_3:%.*]] = icmp ult i32 [[IV_NEXT]], [[SIZE]]
; CHECK-NEXT: br i1 [[C_3]], label [[LOOP_HEADER]], label [[EXIT:%.*]]
; CHECK: exit:
loop.latch:
%m = phi i32 [ 0, %then ], [ %x, %else ]
- %gep = getelementptr i32, i32* %ptr, i32 %iv
- store i32 %m, i32* %gep
+ %gep = getelementptr i32, ptr %ptr, i32 %iv
+ store i32 %m, ptr %gep
%iv.next = add nuw nsw i32 %iv, 1
- %size = load i32, i32* %size_ptr, align 4
+ %size = load i32, ptr %size_ptr, align 4
%c.3 = icmp ult i32 %iv.next, %size
br i1 %c.3, label %loop.header, label %exit
unreachable
}
-define void @peel_exits_to_blocks_branch_to_unreachable_block_with_profile(i32* %ptr, i32 %N, i32 %x, i1 %c.1) !prof !0 {
+define void @peel_exits_to_blocks_branch_to_unreachable_block_with_profile(ptr %ptr, i32 %N, i32 %x, i1 %c.1) !prof !0 {
; CHECK-LABEL: @peel_exits_to_blocks_branch_to_unreachable_block_with_profile(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK-NEXT: br i1 [[C_2]], label [[EXIT_2:%.*]], label [[LOOP_LATCH]]
; CHECK: loop.latch:
; CHECK-NEXT: [[M:%.*]] = phi i32 [ 0, [[THEN]] ], [ [[X]], [[ELSE]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i32 [[IV]]
-; CHECK-NEXT: store i32 [[M]], i32* [[GEP]], align 4
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i32 [[IV]]
+; CHECK-NEXT: store i32 [[M]], ptr [[GEP]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[C_3:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[C_3]], label [[LOOP_HEADER]], label [[EXIT:%.*]], !prof [[PROF5]]
loop.latch:
%m = phi i32 [ 0, %then ], [ %x, %else ]
- %gep = getelementptr i32, i32* %ptr, i32 %iv
- store i32 %m, i32* %gep
+ %gep = getelementptr i32, ptr %ptr, i32 %iv
+ store i32 %m, ptr %gep
%iv.next = add nuw nsw i32 %iv, 1
%c.3 = icmp ult i32 %iv.next, %N
br i1 %c.3, label %loop.header, label %exit, !prof !2
declare void @foo()
-define i32 @peel_readonly_to_make_loads_derefenceable(i32* %ptr, i32 %N, i32* %inv, i1 %c.1) {
+define i32 @peel_readonly_to_make_loads_derefenceable(ptr %ptr, i32 %N, ptr %inv, i1 %c.1) {
; CHECK-LABEL: @peel_readonly_to_make_loads_derefenceable(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP_HEADER_PEEL_BEGIN:%.*]]
; CHECK: loop.header.peel:
; CHECK-NEXT: br i1 [[C_1:%.*]], label [[THEN_PEEL:%.*]], label [[UNREACHABLE_EXIT:%.*]]
; CHECK: then.peel:
-; CHECK-NEXT: [[I_PEEL:%.*]] = load i32, i32* [[INV:%.*]], align 4
+; CHECK-NEXT: [[I_PEEL:%.*]] = load i32, ptr [[INV:%.*]], align 4
; CHECK-NEXT: [[C_2_PEEL:%.*]] = icmp ult i32 [[I_PEEL]], 2
; CHECK-NEXT: br i1 [[C_2_PEEL]], label [[LOOP_LATCH_PEEL:%.*]], label [[UNREACHABLE_EXIT]]
; CHECK: loop.latch.peel:
-; CHECK-NEXT: [[GEP_PEEL:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i32 1
-; CHECK-NEXT: [[LV_PEEL:%.*]] = load i32, i32* [[GEP_PEEL]], align 4
+; CHECK-NEXT: [[GEP_PEEL:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i32 1
+; CHECK-NEXT: [[LV_PEEL:%.*]] = load i32, ptr [[GEP_PEEL]], align 4
; CHECK-NEXT: [[SUM_NEXT_PEEL:%.*]] = add i32 0, [[LV_PEEL]]
; CHECK-NEXT: [[IV_NEXT_PEEL:%.*]] = add nuw nsw i32 1, 1
; CHECK-NEXT: [[C_3_PEEL:%.*]] = icmp ult i32 1, 1000
; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ [[SUM_NEXT_PEEL]], [[ENTRY_PEEL_NEWPH]] ], [ [[SUM_NEXT:%.*]], [[LOOP_LATCH]] ]
; CHECK-NEXT: br i1 [[C_1]], label [[THEN:%.*]], label [[UNREACHABLE_EXIT_LOOPEXIT:%.*]]
; CHECK: then:
-; CHECK-NEXT: [[I:%.*]] = load i32, i32* [[INV]], align 4
+; CHECK-NEXT: [[I:%.*]] = load i32, ptr [[INV]], align 4
; CHECK-NEXT: [[C_2:%.*]] = icmp ult i32 [[I]], 2
; CHECK-NEXT: br i1 [[C_2]], label [[LOOP_LATCH]], label [[UNREACHABLE_EXIT_LOOPEXIT]]
; CHECK: loop.latch:
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[PTR]], i32 [[IV]]
-; CHECK-NEXT: [[LV:%.*]] = load i32, i32* [[GEP]], align 4
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i32 [[IV]]
+; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[GEP]], align 4
; CHECK-NEXT: [[SUM_NEXT]] = add i32 [[SUM]], [[LV]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[C_3:%.*]] = icmp ult i32 [[IV]], 1000
br i1 %c.1, label %then, label %unreachable.exit
then:
- %i = load i32, i32* %inv
+ %i = load i32, ptr %inv
%c.2 = icmp ult i32 %i, 2
br i1 %c.2, label %loop.latch, label %unreachable.exit
loop.latch:
- %gep = getelementptr i32, i32* %ptr, i32 %iv
- %lv = load i32, i32* %gep
+ %gep = getelementptr i32, ptr %ptr, i32 %iv
+ %lv = load i32, ptr %gep
%sum.next = add i32 %sum, %lv
%iv.next = add nuw nsw i32 %iv, 1
%c.3 = icmp ult i32 %iv, 1000
unreachable
}
-define i32 @peel_readonly_to_make_loads_derefenceable_exits_lead_to_unreachable(i32* %ptr, i32 %N, i32* %inv, i1 %c.1) {
+define i32 @peel_readonly_to_make_loads_derefenceable_exits_lead_to_unreachable(ptr %ptr, i32 %N, ptr %inv, i1 %c.1) {
; CHECK-LABEL: @peel_readonly_to_make_loads_derefenceable_exits_lead_to_unreachable(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[SUM_NEXT:%.*]], [[LOOP_LATCH]] ]
; CHECK-NEXT: br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[EXIT_2:%.*]]
; CHECK: then:
-; CHECK-NEXT: [[I:%.*]] = load i32, i32* [[INV:%.*]], align 4
+; CHECK-NEXT: [[I:%.*]] = load i32, ptr [[INV:%.*]], align 4
; CHECK-NEXT: [[C_2:%.*]] = icmp ult i32 [[I]], 2
; CHECK-NEXT: br i1 [[C_2]], label [[THEN_2:%.*]], label [[EXIT_2]]
; CHECK: then.2:
; CHECK-NEXT: [[C_4:%.*]] = icmp ult i32 [[I]], 4
; CHECK-NEXT: br i1 [[C_4]], label [[LOOP_LATCH]], label [[EXIT_3:%.*]]
; CHECK: loop.latch:
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i32 [[IV]]
-; CHECK-NEXT: [[LV:%.*]] = load i32, i32* [[GEP]], align 4
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i32 [[IV]]
+; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[GEP]], align 4
; CHECK-NEXT: [[SUM_NEXT]] = add i32 [[SUM]], [[LV]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[C_3:%.*]] = icmp ult i32 [[IV]], 1000
br i1 %c.1, label %then, label %exit.2
then:
- %i = load i32, i32* %inv
+ %i = load i32, ptr %inv
%c.2 = icmp ult i32 %i, 2
br i1 %c.2, label %then.2, label %exit.2
br i1 %c.4, label %loop.latch, label %exit.3
loop.latch:
- %gep = getelementptr i32, i32* %ptr, i32 %iv
- %lv = load i32, i32* %gep
+ %gep = getelementptr i32, ptr %ptr, i32 %iv
+ %lv = load i32, ptr %gep
%sum.next = add i32 %sum, %lv
%iv.next = add nuw nsw i32 %iv, 1
%c.3 = icmp ult i32 %iv, 1000
unreachable
}
-define i32 @do_not_peel_readonly_load_in_header(i32* %ptr, i32 %N, i32* %inv, i1 %c.1) {
+define i32 @do_not_peel_readonly_load_in_header(ptr %ptr, i32 %N, ptr %inv, i1 %c.1) {
; CHECK-LABEL: @do_not_peel_readonly_load_in_header(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK: loop.header:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 1, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[SUM_NEXT:%.*]], [[LOOP_LATCH]] ]
-; CHECK-NEXT: [[I:%.*]] = load i32, i32* [[INV:%.*]], align 4
+; CHECK-NEXT: [[I:%.*]] = load i32, ptr [[INV:%.*]], align 4
; CHECK-NEXT: [[C_2:%.*]] = icmp ult i32 [[I]], 2
; CHECK-NEXT: br i1 [[C_2]], label [[THEN:%.*]], label [[UNREACHABLE_EXIT:%.*]]
; CHECK: then:
; CHECK-NEXT: br i1 [[C_1:%.*]], label [[LOOP_LATCH]], label [[UNREACHABLE_EXIT]]
; CHECK: loop.latch:
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i32 [[IV]]
-; CHECK-NEXT: [[LV:%.*]] = load i32, i32* [[GEP]], align 4
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i32 [[IV]]
+; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[GEP]], align 4
; CHECK-NEXT: [[SUM_NEXT]] = add i32 [[SUM]], [[LV]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[C_3:%.*]] = icmp ult i32 [[IV]], 1000
loop.header:
%iv = phi i32 [ 1, %entry ], [ %iv.next, %loop.latch ]
%sum = phi i32 [ 0, %entry ], [ %sum.next, %loop.latch ]
- %i = load i32, i32* %inv
+ %i = load i32, ptr %inv
%c.2 = icmp ult i32 %i, 2
br i1 %c.2, label %then, label %unreachable.exit
br i1 %c.1, label %loop.latch, label %unreachable.exit
loop.latch:
- %gep = getelementptr i32, i32* %ptr, i32 %iv
- %lv = load i32, i32* %gep
+ %gep = getelementptr i32, ptr %ptr, i32 %iv
+ %lv = load i32, ptr %gep
%sum.next = add i32 %sum, %lv
%iv.next = add nuw nsw i32 %iv, 1
%c.3 = icmp ult i32 %iv, 1000
unreachable
}
-define i32 @do_not_peel_readonly_but_wont_turn_dereferenceable(i32* %ptr, i32 %N, i32 %x, i32* %inv) {
+define i32 @do_not_peel_readonly_but_wont_turn_dereferenceable(ptr %ptr, i32 %N, i32 %x, ptr %inv) {
; CHECK-LABEL: @do_not_peel_readonly_but_wont_turn_dereferenceable(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK-NEXT: [[C_1:%.*]] = icmp eq i32 [[IV]], [[X:%.*]]
; CHECK-NEXT: br i1 [[C_1]], label [[THEN:%.*]], label [[ELSE:%.*]]
; CHECK: then:
-; CHECK-NEXT: [[I:%.*]] = load i32, i32* [[INV:%.*]], align 4
+; CHECK-NEXT: [[I:%.*]] = load i32, ptr [[INV:%.*]], align 4
; CHECK-NEXT: [[C_2:%.*]] = icmp eq i32 [[I]], 20
; CHECK-NEXT: br i1 [[C_2]], label [[UNREACHABLE_EXIT:%.*]], label [[LOOP_LATCH]]
; CHECK: else:
; CHECK-NEXT: br label [[LOOP_LATCH]]
; CHECK: loop.latch:
; CHECK-NEXT: [[P:%.*]] = phi i32 [ [[I]], [[THEN]] ], [ 0, [[ELSE]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i32 [[IV]]
-; CHECK-NEXT: [[LV:%.*]] = load i32, i32* [[GEP]], align 4
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i32 [[IV]]
+; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[GEP]], align 4
; CHECK-NEXT: [[ADD_1:%.*]] = add i32 [[LV]], [[P]]
; CHECK-NEXT: [[SUM_NEXT]] = add i32 [[SUM]], [[ADD_1]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
br i1 %c.1, label %then, label %else
then:
- %i = load i32, i32* %inv
+ %i = load i32, ptr %inv
%c.2 = icmp eq i32 %i, 20
br i1 %c.2, label %unreachable.exit, label %loop.latch
loop.latch:
%p = phi i32 [ %i, %then ], [ 0, %else ]
- %gep = getelementptr i32, i32* %ptr, i32 %iv
- %lv = load i32, i32* %gep
+ %gep = getelementptr i32, ptr %ptr, i32 %iv
+ %lv = load i32, ptr %gep
%add.1 = add i32 %lv, %p
%sum.next = add i32 %sum, %add.1
%iv.next = add nuw nsw i32 %iv, 1
unreachable
}
-define i32 @do_not_peel_write1(i32* %ptr, i32 %N, i32 %x, i32* %inv, i32* %dst, i1 %c.1) {
+define i32 @do_not_peel_write1(ptr %ptr, i32 %N, i32 %x, ptr %inv, ptr %dst, i1 %c.1) {
; CHECK-LABEL: @do_not_peel_write1(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[SUM_NEXT:%.*]], [[LOOP_LATCH]] ]
; CHECK-NEXT: br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[UNREACHABLE_EXIT:%.*]]
; CHECK: then:
-; CHECK-NEXT: [[I:%.*]] = load i32, i32* [[INV:%.*]], align 4
+; CHECK-NEXT: [[I:%.*]] = load i32, ptr [[INV:%.*]], align 4
; CHECK-NEXT: [[C_2:%.*]] = icmp ult i32 [[I]], 2
; CHECK-NEXT: br i1 [[C_2]], label [[LOOP_LATCH]], label [[UNREACHABLE_EXIT]]
; CHECK: loop.latch:
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i32 [[IV]]
-; CHECK-NEXT: [[LV:%.*]] = load i32, i32* [[GEP]], align 4
-; CHECK-NEXT: store i32 [[LV]], i32* [[DST:%.*]], align 4
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i32 [[IV]]
+; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[GEP]], align 4
+; CHECK-NEXT: store i32 [[LV]], ptr [[DST:%.*]], align 4
; CHECK-NEXT: [[SUM_NEXT]] = add i32 [[SUM]], [[LV]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[C_3:%.*]] = icmp ult i32 [[IV]], 1000
br i1 %c.1, label %then, label %unreachable.exit
then:
- %i = load i32, i32* %inv
+ %i = load i32, ptr %inv
%c.2 = icmp ult i32 %i, 2
br i1 %c.2, label %loop.latch, label %unreachable.exit
loop.latch:
- %gep = getelementptr i32, i32* %ptr, i32 %iv
- %lv = load i32, i32* %gep
- store i32 %lv, i32* %dst
+ %gep = getelementptr i32, ptr %ptr, i32 %iv
+ %lv = load i32, ptr %gep
+ store i32 %lv, ptr %dst
%sum.next = add i32 %sum, %lv
%iv.next = add nuw nsw i32 %iv, 1
%c.3 = icmp ult i32 %iv, 1000
unreachable
}
-define i32 @do_not_peel_write2(i32* %ptr, i32 %N, i32* %inv, i32* %dst) {
+define i32 @do_not_peel_write2(ptr %ptr, i32 %N, ptr %inv, ptr %dst) {
; CHECK-LABEL: @do_not_peel_write2(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK: loop.header:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 1, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[SUM_NEXT:%.*]], [[LOOP_LATCH]] ]
-; CHECK-NEXT: [[I:%.*]] = load i32, i32* [[INV:%.*]], align 4
+; CHECK-NEXT: [[I:%.*]] = load i32, ptr [[INV:%.*]], align 4
; CHECK-NEXT: [[C_1:%.*]] = icmp eq i32 [[I]], 20
; CHECK-NEXT: br i1 [[C_1]], label [[THEN:%.*]], label [[ELSE:%.*]]
; CHECK: then:
-; CHECK-NEXT: store i32 [[I]], i32* [[DST:%.*]], align 4
+; CHECK-NEXT: store i32 [[I]], ptr [[DST:%.*]], align 4
; CHECK-NEXT: br label [[LOOP_LATCH]]
; CHECK: else:
; CHECK-NEXT: br label [[UNREACHABLE_EXIT:%.*]]
; CHECK: loop.latch:
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i32 [[IV]]
-; CHECK-NEXT: [[LV:%.*]] = load i32, i32* [[GEP]], align 4
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i32 [[IV]]
+; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[GEP]], align 4
; CHECK-NEXT: [[ADD_1:%.*]] = add i32 [[LV]], [[I]]
; CHECK-NEXT: [[SUM_NEXT]] = add i32 [[SUM]], [[ADD_1]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
loop.header:
%iv = phi i32 [ 1, %entry ], [ %iv.next, %loop.latch ]
%sum = phi i32 [ 0, %entry ], [ %sum.next, %loop.latch ]
- %i = load i32, i32* %inv
+ %i = load i32, ptr %inv
%c.1 = icmp eq i32 %i, 20
br i1 %c.1, label %then, label %else
then:
- store i32 %i, i32* %dst
+ store i32 %i, ptr %dst
br label %loop.latch
else:
br label %unreachable.exit
loop.latch:
- %gep = getelementptr i32, i32* %ptr, i32 %iv
- %lv = load i32, i32* %gep
+ %gep = getelementptr i32, ptr %ptr, i32 %iv
+ %lv = load i32, ptr %gep
%add.1 = add i32 %lv, %i
%sum.next = add i32 %sum, %add.1
%iv.next = add nuw nsw i32 %iv, 1
declare i32 @llvm.experimental.deoptimize.i32(...)
-define i32 @peel_with_deopt_exit(i32* %ptr, i32 %N, i32* %inv, i1 %c.1) {
+define i32 @peel_with_deopt_exit(ptr %ptr, i32 %N, ptr %inv, i1 %c.1) {
; CHECK-LABEL: @peel_with_deopt_exit(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[SUM_NEXT:%.*]], [[LOOP_LATCH]] ]
; CHECK-NEXT: br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[DEOPT_EXIT:%.*]]
; CHECK: then:
-; CHECK-NEXT: [[I:%.*]] = load i32, i32* [[INV:%.*]], align 4
+; CHECK-NEXT: [[I:%.*]] = load i32, ptr [[INV:%.*]], align 4
; CHECK-NEXT: [[C_2:%.*]] = icmp ult i32 [[I]], 2
; CHECK-NEXT: br i1 [[C_2]], label [[LOOP_LATCH]], label [[DEOPT_EXIT]]
; CHECK: loop.latch:
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i32 [[IV]]
-; CHECK-NEXT: [[LV:%.*]] = load i32, i32* [[GEP]], align 4
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i32 [[IV]]
+; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[GEP]], align 4
; CHECK-NEXT: [[SUM_NEXT]] = add i32 [[SUM]], [[LV]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[C_3:%.*]] = icmp ult i32 [[IV]], 1000
br i1 %c.1, label %then, label %deopt.exit
then:
- %i = load i32, i32* %inv
+ %i = load i32, ptr %inv
%c.2 = icmp ult i32 %i, 2
br i1 %c.2, label %loop.latch, label %deopt.exit
loop.latch:
- %gep = getelementptr i32, i32* %ptr, i32 %iv
- %lv = load i32, i32* %gep
+ %gep = getelementptr i32, ptr %ptr, i32 %iv
+ %lv = load i32, ptr %gep
%sum.next = add i32 %sum, %lv
%iv.next = add nuw nsw i32 %iv, 1
%c.3 = icmp ult i32 %iv, 1000
ret i32 %rval
}
-define i32 @do_not_peel_when_header_exiting(i32* %ptr, i32 %N, i32* %inv) {
+define i32 @do_not_peel_when_header_exiting(ptr %ptr, i32 %N, ptr %inv) {
; CHECK-LABEL: @do_not_peel_when_header_exiting(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK-NEXT: [[C_1:%.*]] = icmp ult i32 [[IV]], 1000
; CHECK-NEXT: br i1 [[C_1]], label [[THEN:%.*]], label [[EXIT:%.*]]
; CHECK: then:
-; CHECK-NEXT: [[I:%.*]] = load i32, i32* [[INV:%.*]], align 4
+; CHECK-NEXT: [[I:%.*]] = load i32, ptr [[INV:%.*]], align 4
; CHECK-NEXT: [[C_2:%.*]] = icmp ult i32 [[I]], 2
; CHECK-NEXT: br i1 [[C_2]], label [[LOOP_LATCH]], label [[UNREACHABLE_EXIT:%.*]]
; CHECK: loop.latch:
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i32 [[IV]]
-; CHECK-NEXT: [[LV:%.*]] = load i32, i32* [[GEP]], align 4
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i32 [[IV]]
+; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[GEP]], align 4
; CHECK-NEXT: [[SUM_NEXT]] = add i32 [[SUM]], [[LV]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: br label [[LOOP_HEADER]]
br i1 %c.1, label %then, label %exit
then:
- %i = load i32, i32* %inv
+ %i = load i32, ptr %inv
%c.2 = icmp ult i32 %i, 2
br i1 %c.2, label %loop.latch, label %unreachable.exit
loop.latch:
- %gep = getelementptr i32, i32* %ptr, i32 %iv
- %lv = load i32, i32* %gep
+ %gep = getelementptr i32, ptr %ptr, i32 %iv
+ %lv = load i32, ptr %gep
%sum.next = add i32 %sum, %lv
%iv.next = add nuw nsw i32 %iv, 1
br label %loop.header
unreachable
}
-define i32 @do_not_peel_readonly_to_make_loads_derefenceable_but_does_not_control_exit(i32* %ptr, i32 %N, i32* %inv, i1 %c.1, i32 %N.2) {
+define i32 @do_not_peel_readonly_to_make_loads_derefenceable_but_does_not_control_exit(ptr %ptr, i32 %N, ptr %inv, i1 %c.1, i32 %N.2) {
; CHECK-LABEL: @do_not_peel_readonly_to_make_loads_derefenceable_but_does_not_control_exit(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[SUM_NEXT:%.*]], [[LOOP_LATCH]] ]
; CHECK-NEXT: br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[UNREACHABLE_EXIT:%.*]]
; CHECK: then:
-; CHECK-NEXT: [[I:%.*]] = load i32, i32* [[INV:%.*]], align 4
+; CHECK-NEXT: [[I:%.*]] = load i32, ptr [[INV:%.*]], align 4
; CHECK-NEXT: [[C_2:%.*]] = icmp ult i32 [[IV]], [[N_2:%.*]]
; CHECK-NEXT: br i1 [[C_2]], label [[LOOP_LATCH]], label [[UNREACHABLE_EXIT]]
; CHECK: loop.latch:
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i32 [[IV]]
-; CHECK-NEXT: [[LV:%.*]] = load i32, i32* [[GEP]], align 4
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i32 [[IV]]
+; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[GEP]], align 4
; CHECK-NEXT: [[SUM_NEXT]] = add i32 [[SUM]], [[LV]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[C_3:%.*]] = icmp ult i32 [[IV]], 1000
br i1 %c.1, label %then, label %unreachable.exit
then:
- %i = load i32, i32* %inv
+ %i = load i32, ptr %inv
%c.2 = icmp ult i32 %iv, %N.2
br i1 %c.2, label %loop.latch, label %unreachable.exit
loop.latch:
- %gep = getelementptr i32, i32* %ptr, i32 %iv
- %lv = load i32, i32* %gep
+ %gep = getelementptr i32, ptr %ptr, i32 %iv
+ %lv = load i32, ptr %gep
%sum.next = add i32 %sum, %lv
%iv.next = add nuw nsw i32 %iv, 1
%c.3 = icmp ult i32 %iv, 1000
@glob = global i32 10
-define i32 @do_not_peel_readonly_but_already_deref_glob(i32* %ptr, i32 %N, i1 %c.1) {
+define i32 @do_not_peel_readonly_but_already_deref_glob(ptr %ptr, i32 %N, i1 %c.1) {
; CHECK-LABEL: @do_not_peel_readonly_but_already_deref_glob(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[SUM_NEXT:%.*]], [[LOOP_LATCH]] ]
; CHECK-NEXT: br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[UNREACHABLE_EXIT:%.*]]
; CHECK: then:
-; CHECK-NEXT: [[I:%.*]] = load i32, i32* @glob, align 4
+; CHECK-NEXT: [[I:%.*]] = load i32, ptr @glob, align 4
; CHECK-NEXT: [[C_2:%.*]] = icmp ult i32 [[I]], 2
; CHECK-NEXT: br i1 [[C_2]], label [[LOOP_LATCH]], label [[UNREACHABLE_EXIT]]
; CHECK: loop.latch:
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i32 [[IV]]
-; CHECK-NEXT: [[LV:%.*]] = load i32, i32* [[GEP]], align 4
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i32 [[IV]]
+; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[GEP]], align 4
; CHECK-NEXT: [[SUM_NEXT]] = add i32 [[SUM]], [[LV]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[C_3:%.*]] = icmp ult i32 [[IV]], 1000
br i1 %c.1, label %then, label %unreachable.exit
then:
- %i = load i32, i32* @glob
+ %i = load i32, ptr @glob
%c.2 = icmp ult i32 %i, 2
br i1 %c.2, label %loop.latch, label %unreachable.exit
loop.latch:
- %gep = getelementptr i32, i32* %ptr, i32 %iv
- %lv = load i32, i32* %gep
+ %gep = getelementptr i32, ptr %ptr, i32 %iv
+ %lv = load i32, ptr %gep
%sum.next = add i32 %sum, %lv
%iv.next = add nuw nsw i32 %iv, 1
%c.3 = icmp ult i32 %iv, 1000
pl_dolane.i970: ; preds = %pl_loop.i964
%storeval.i.i969 = extractelement <4 x i8> <i8 0, i8 1, i8 2, i8 3>, i32 %0
- store i8 %storeval.i.i969, i8* undef, align 1
+ store i8 %storeval.i.i969, ptr undef, align 1
br label %pl_loopend.i973
pl_loopend.i973: ; preds = %pl_dolane.i970, %pl_loop.i964
exit: ; preds = %inner.latch
%storemerge1.lcssa = phi i32 [ %storemerge1, %inner.latch ]
- store i32 %storemerge1.lcssa, i32* @b, align 4
+ store i32 %storemerge1.lcssa, ptr @b, align 4
ret void
outer.latch: ; preds = %inner.header
br label %inner.header
inner.header:
- %x = load i32, i32* undef, align 4
+ %x = load i32, ptr undef, align 4
br i1 true, label %outer.latch, label %inner.latch
inner.latch:
%inc6 = add nsw i32 %x, 1
- store i32 %inc6, i32* undef, align 4
+ store i32 %inc6, ptr undef, align 4
br i1 false, label %inner.header, label %exit
exit:
target datalayout = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32"
target triple = "i686-pc-windows-msvc"
-declare void @fn1(i8*)
+declare void @fn1(ptr)
-declare i1 @fn2(i8*, i8*)
+declare i1 @fn2(ptr, ptr)
-define void @fn4() personality i32 (...)* @__CxxFrameHandler3 {
+define void @fn4() personality ptr @__CxxFrameHandler3 {
entry:
br label %for.body
for.body: ; preds = %for.inc, %entry
%i.05 = phi i8 [ 0, %entry ], [ %inc, %for.inc ]
- store i8 undef, i8* undef, align 4
- invoke void @fn1(i8* undef)
+ store i8 undef, ptr undef, align 4
+ invoke void @fn1(ptr undef)
to label %call.i.noexc unwind label %ehcleanup
call.i.noexc: ; preds = %for.body
- %call1.i2 = invoke i1 @fn2(i8* undef, i8* undef)
+ %call1.i2 = invoke i1 @fn2(ptr undef, ptr undef)
to label %call1.i.noexc unwind label %ehcleanup
call1.i.noexc: ; preds = %call.i.noexc
br i1 undef, label %if.then.i, label %if.end4.i
if.then.i: ; preds = %call1.i.noexc
- %tmp1 = load i8, i8* undef, align 4
+ %tmp1 = load i8, ptr undef, align 4
%tobool.i = icmp eq i8 undef, undef
br i1 undef, label %if.end4.i, label %if.then2.i
if.then2.i: ; preds = %if.then.i
- %call3.i3 = invoke i1 @fn2(i8* undef, i8* null)
+ %call3.i3 = invoke i1 @fn2(ptr undef, ptr null)
to label %call3.i.noexc unwind label %ehcleanup
call3.i.noexc: ; preds = %if.then2.i
br label %if.end4.i
if.end4.i: ; preds = %call3.i.noexc, %if.then.i, %call1.i.noexc
- %tmp2 = load i8, i8* undef, align 4
+ %tmp2 = load i8, ptr undef, align 4
br label %if.then6.i
if.then6.i: ; preds = %if.end4.i
- %call7.i4 = invoke i1 @fn2(i8* undef, i8* null)
+ %call7.i4 = invoke i1 @fn2(ptr undef, ptr null)
to label %call7.i.noexc unwind label %ehcleanup
call7.i.noexc: ; preds = %if.then6.i
br label %fn3
fn3: ; preds = %call7.i.noexc
- %tmp3 = load i8, i8* undef, align 4
+ %tmp3 = load i8, ptr undef, align 4
%inc.i = add nsw i8 undef, undef
- store i8 undef, i8* undef, align 4
+ store i8 undef, ptr undef, align 4
br label %for.inc
for.inc: ; preds = %fn3
; CHECK-NEXT: [[C3:%.*]] = call i1 @unknown(i32 [[D_0]])
; CHECK-NEXT: br i1 [[C3]], label [[LATCH1]], label [[PH2]]
; CHECK: latch1:
-; CHECK-NEXT: [[TMP0]] = load i32, i32* @b, align 4
+; CHECK-NEXT: [[TMP0]] = load i32, ptr @b, align 4
; CHECK-NEXT: br label [[H1]]
; CHECK: exit.loopexit:
; CHECK-NEXT: [[D_0_LCSSA_PH:%.*]] = phi i32 [ [[D_0]], [[H3]] ]
br i1 %c3, label %latch1, label %ph2
latch1: ; preds = %exit2
- %1 = load i32, i32* @b, align 4
+ %1 = load i32, ptr @b, align 4
br label %h1
exit:
; PEEL2: for.body.peel.begin:
; PEEL2-NEXT: br label [[FOR_BODY_PEEL:%.*]]
; PEEL2: for.body.peel:
-; PEEL2-NEXT: [[ARRAYIDX_PEEL:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* @a, i64 0, i64 0
+; PEEL2-NEXT: [[ARRAYIDX_PEEL:%.*]] = getelementptr inbounds [8 x i32], ptr @a, i64 0, i64 0
; PEEL2-NEXT: [[TMP0:%.*]] = trunc i64 0 to i32
-; PEEL2-NEXT: store i32 [[TMP0]], i32* [[ARRAYIDX_PEEL]], align 4
+; PEEL2-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX_PEEL]], align 4
; PEEL2-NEXT: [[INDVARS_IV_NEXT_PEEL:%.*]] = add nuw nsw i64 0, 1
; PEEL2-NEXT: [[EXITCOND_PEEL:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT_PEEL]], 8
; PEEL2-NEXT: br i1 [[EXITCOND_PEEL]], label [[FOR_BODY_PEEL_NEXT:%.*]], label [[FOR_EXIT:%.*]]
; PEEL2: for.body.peel.next:
; PEEL2-NEXT: br label [[FOR_BODY_PEEL2:%.*]]
; PEEL2: for.body.peel2:
-; PEEL2-NEXT: [[ARRAYIDX_PEEL3:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* @a, i64 0, i64 [[INDVARS_IV_NEXT_PEEL]]
+; PEEL2-NEXT: [[ARRAYIDX_PEEL3:%.*]] = getelementptr inbounds [8 x i32], ptr @a, i64 0, i64 [[INDVARS_IV_NEXT_PEEL]]
; PEEL2-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT_PEEL]] to i32
-; PEEL2-NEXT: store i32 [[TMP1]], i32* [[ARRAYIDX_PEEL3]], align 4
+; PEEL2-NEXT: store i32 [[TMP1]], ptr [[ARRAYIDX_PEEL3]], align 4
; PEEL2-NEXT: [[INDVARS_IV_NEXT_PEEL4:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_PEEL]], 1
; PEEL2-NEXT: [[EXITCOND_PEEL5:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT_PEEL4]], 8
; PEEL2-NEXT: br i1 [[EXITCOND_PEEL5]], label [[FOR_BODY_PEEL_NEXT1:%.*]], label [[FOR_EXIT]]
; PEEL2-NEXT: br label [[FOR_BODY:%.*]]
; PEEL2: for.body:
; PEEL2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_PEEL4]], [[ENTRY_PEEL_NEWPH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; PEEL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* @a, i64 0, i64 [[INDVARS_IV]]
+; PEEL2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], ptr @a, i64 0, i64 [[INDVARS_IV]]
; PEEL2-NEXT: [[TMP2:%.*]] = trunc i64 [[INDVARS_IV]] to i32
-; PEEL2-NEXT: store i32 [[TMP2]], i32* [[ARRAYIDX]], align 4
+; PEEL2-NEXT: store i32 [[TMP2]], ptr [[ARRAYIDX]], align 4
; PEEL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; PEEL2-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], 8
; PEEL2-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_EXIT_LOOPEXIT:%.*]], !llvm.loop [[LOOP0:![0-9]+]]
; PEEL8: for.body.peel.begin:
; PEEL8-NEXT: br label [[FOR_BODY_PEEL:%.*]]
; PEEL8: for.body.peel:
-; PEEL8-NEXT: [[ARRAYIDX_PEEL:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* @a, i64 0, i64 0
+; PEEL8-NEXT: [[ARRAYIDX_PEEL:%.*]] = getelementptr inbounds [8 x i32], ptr @a, i64 0, i64 0
; PEEL8-NEXT: [[TMP0:%.*]] = trunc i64 0 to i32
-; PEEL8-NEXT: store i32 [[TMP0]], i32* [[ARRAYIDX_PEEL]], align 4
+; PEEL8-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX_PEEL]], align 4
; PEEL8-NEXT: [[INDVARS_IV_NEXT_PEEL:%.*]] = add nuw nsw i64 0, 1
; PEEL8-NEXT: [[EXITCOND_PEEL:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT_PEEL]], 8
; PEEL8-NEXT: br i1 [[EXITCOND_PEEL]], label [[FOR_BODY_PEEL_NEXT:%.*]], label [[FOR_EXIT:%.*]]
; PEEL8: for.body.peel.next:
; PEEL8-NEXT: br label [[FOR_BODY_PEEL2:%.*]]
; PEEL8: for.body.peel2:
-; PEEL8-NEXT: [[ARRAYIDX_PEEL3:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* @a, i64 0, i64 [[INDVARS_IV_NEXT_PEEL]]
+; PEEL8-NEXT: [[ARRAYIDX_PEEL3:%.*]] = getelementptr inbounds [8 x i32], ptr @a, i64 0, i64 [[INDVARS_IV_NEXT_PEEL]]
; PEEL8-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT_PEEL]] to i32
-; PEEL8-NEXT: store i32 [[TMP1]], i32* [[ARRAYIDX_PEEL3]], align 4
+; PEEL8-NEXT: store i32 [[TMP1]], ptr [[ARRAYIDX_PEEL3]], align 4
; PEEL8-NEXT: [[INDVARS_IV_NEXT_PEEL4:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_PEEL]], 1
; PEEL8-NEXT: [[EXITCOND_PEEL5:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT_PEEL4]], 8
; PEEL8-NEXT: br i1 [[EXITCOND_PEEL5]], label [[FOR_BODY_PEEL_NEXT1:%.*]], label [[FOR_EXIT]]
; PEEL8: for.body.peel.next1:
; PEEL8-NEXT: br label [[FOR_BODY_PEEL7:%.*]]
; PEEL8: for.body.peel7:
-; PEEL8-NEXT: [[ARRAYIDX_PEEL8:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* @a, i64 0, i64 [[INDVARS_IV_NEXT_PEEL4]]
+; PEEL8-NEXT: [[ARRAYIDX_PEEL8:%.*]] = getelementptr inbounds [8 x i32], ptr @a, i64 0, i64 [[INDVARS_IV_NEXT_PEEL4]]
; PEEL8-NEXT: [[TMP2:%.*]] = trunc i64 [[INDVARS_IV_NEXT_PEEL4]] to i32
-; PEEL8-NEXT: store i32 [[TMP2]], i32* [[ARRAYIDX_PEEL8]], align 4
+; PEEL8-NEXT: store i32 [[TMP2]], ptr [[ARRAYIDX_PEEL8]], align 4
; PEEL8-NEXT: [[INDVARS_IV_NEXT_PEEL9:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_PEEL4]], 1
; PEEL8-NEXT: [[EXITCOND_PEEL10:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT_PEEL9]], 8
; PEEL8-NEXT: br i1 [[EXITCOND_PEEL10]], label [[FOR_BODY_PEEL_NEXT6:%.*]], label [[FOR_EXIT]]
; PEEL8: for.body.peel.next6:
; PEEL8-NEXT: br label [[FOR_BODY_PEEL12:%.*]]
; PEEL8: for.body.peel12:
-; PEEL8-NEXT: [[ARRAYIDX_PEEL13:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* @a, i64 0, i64 [[INDVARS_IV_NEXT_PEEL9]]
+; PEEL8-NEXT: [[ARRAYIDX_PEEL13:%.*]] = getelementptr inbounds [8 x i32], ptr @a, i64 0, i64 [[INDVARS_IV_NEXT_PEEL9]]
; PEEL8-NEXT: [[TMP3:%.*]] = trunc i64 [[INDVARS_IV_NEXT_PEEL9]] to i32
-; PEEL8-NEXT: store i32 [[TMP3]], i32* [[ARRAYIDX_PEEL13]], align 4
+; PEEL8-NEXT: store i32 [[TMP3]], ptr [[ARRAYIDX_PEEL13]], align 4
; PEEL8-NEXT: [[INDVARS_IV_NEXT_PEEL14:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_PEEL9]], 1
; PEEL8-NEXT: [[EXITCOND_PEEL15:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT_PEEL14]], 8
; PEEL8-NEXT: br i1 [[EXITCOND_PEEL15]], label [[FOR_BODY_PEEL_NEXT11:%.*]], label [[FOR_EXIT]]
; PEEL8: for.body.peel.next11:
; PEEL8-NEXT: br label [[FOR_BODY_PEEL17:%.*]]
; PEEL8: for.body.peel17:
-; PEEL8-NEXT: [[ARRAYIDX_PEEL18:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* @a, i64 0, i64 [[INDVARS_IV_NEXT_PEEL14]]
+; PEEL8-NEXT: [[ARRAYIDX_PEEL18:%.*]] = getelementptr inbounds [8 x i32], ptr @a, i64 0, i64 [[INDVARS_IV_NEXT_PEEL14]]
; PEEL8-NEXT: [[TMP4:%.*]] = trunc i64 [[INDVARS_IV_NEXT_PEEL14]] to i32
-; PEEL8-NEXT: store i32 [[TMP4]], i32* [[ARRAYIDX_PEEL18]], align 4
+; PEEL8-NEXT: store i32 [[TMP4]], ptr [[ARRAYIDX_PEEL18]], align 4
; PEEL8-NEXT: [[INDVARS_IV_NEXT_PEEL19:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_PEEL14]], 1
; PEEL8-NEXT: [[EXITCOND_PEEL20:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT_PEEL19]], 8
; PEEL8-NEXT: br i1 [[EXITCOND_PEEL20]], label [[FOR_BODY_PEEL_NEXT16:%.*]], label [[FOR_EXIT]]
; PEEL8: for.body.peel.next16:
; PEEL8-NEXT: br label [[FOR_BODY_PEEL22:%.*]]
; PEEL8: for.body.peel22:
-; PEEL8-NEXT: [[ARRAYIDX_PEEL23:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* @a, i64 0, i64 [[INDVARS_IV_NEXT_PEEL19]]
+; PEEL8-NEXT: [[ARRAYIDX_PEEL23:%.*]] = getelementptr inbounds [8 x i32], ptr @a, i64 0, i64 [[INDVARS_IV_NEXT_PEEL19]]
; PEEL8-NEXT: [[TMP5:%.*]] = trunc i64 [[INDVARS_IV_NEXT_PEEL19]] to i32
-; PEEL8-NEXT: store i32 [[TMP5]], i32* [[ARRAYIDX_PEEL23]], align 4
+; PEEL8-NEXT: store i32 [[TMP5]], ptr [[ARRAYIDX_PEEL23]], align 4
; PEEL8-NEXT: [[INDVARS_IV_NEXT_PEEL24:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_PEEL19]], 1
; PEEL8-NEXT: [[EXITCOND_PEEL25:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT_PEEL24]], 8
; PEEL8-NEXT: br i1 [[EXITCOND_PEEL25]], label [[FOR_BODY_PEEL_NEXT21:%.*]], label [[FOR_EXIT]]
; PEEL8: for.body.peel.next21:
; PEEL8-NEXT: br label [[FOR_BODY_PEEL27:%.*]]
; PEEL8: for.body.peel27:
-; PEEL8-NEXT: [[ARRAYIDX_PEEL28:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* @a, i64 0, i64 [[INDVARS_IV_NEXT_PEEL24]]
+; PEEL8-NEXT: [[ARRAYIDX_PEEL28:%.*]] = getelementptr inbounds [8 x i32], ptr @a, i64 0, i64 [[INDVARS_IV_NEXT_PEEL24]]
; PEEL8-NEXT: [[TMP6:%.*]] = trunc i64 [[INDVARS_IV_NEXT_PEEL24]] to i32
-; PEEL8-NEXT: store i32 [[TMP6]], i32* [[ARRAYIDX_PEEL28]], align 4
+; PEEL8-NEXT: store i32 [[TMP6]], ptr [[ARRAYIDX_PEEL28]], align 4
; PEEL8-NEXT: [[INDVARS_IV_NEXT_PEEL29:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_PEEL24]], 1
; PEEL8-NEXT: [[EXITCOND_PEEL30:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT_PEEL29]], 8
; PEEL8-NEXT: br i1 [[EXITCOND_PEEL30]], label [[FOR_BODY_PEEL_NEXT26:%.*]], label [[FOR_EXIT]]
; PEEL8: for.body.peel.next26:
; PEEL8-NEXT: br label [[FOR_BODY_PEEL32:%.*]]
; PEEL8: for.body.peel32:
-; PEEL8-NEXT: [[ARRAYIDX_PEEL33:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* @a, i64 0, i64 [[INDVARS_IV_NEXT_PEEL29]]
+; PEEL8-NEXT: [[ARRAYIDX_PEEL33:%.*]] = getelementptr inbounds [8 x i32], ptr @a, i64 0, i64 [[INDVARS_IV_NEXT_PEEL29]]
; PEEL8-NEXT: [[TMP7:%.*]] = trunc i64 [[INDVARS_IV_NEXT_PEEL29]] to i32
-; PEEL8-NEXT: store i32 [[TMP7]], i32* [[ARRAYIDX_PEEL33]], align 4
+; PEEL8-NEXT: store i32 [[TMP7]], ptr [[ARRAYIDX_PEEL33]], align 4
; PEEL8-NEXT: [[INDVARS_IV_NEXT_PEEL34:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_PEEL29]], 1
; PEEL8-NEXT: [[EXITCOND_PEEL35:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT_PEEL34]], 8
; PEEL8-NEXT: br i1 [[EXITCOND_PEEL35]], label [[FOR_BODY_PEEL_NEXT31:%.*]], label [[FOR_EXIT]]
; PEEL8-NEXT: br label [[FOR_BODY:%.*]]
; PEEL8: for.body:
; PEEL8-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_PEEL34]], [[ENTRY_PEEL_NEWPH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; PEEL8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* @a, i64 0, i64 [[INDVARS_IV]]
+; PEEL8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], ptr @a, i64 0, i64 [[INDVARS_IV]]
; PEEL8-NEXT: [[TMP8:%.*]] = trunc i64 [[INDVARS_IV]] to i32
-; PEEL8-NEXT: store i32 [[TMP8]], i32* [[ARRAYIDX]], align 4
+; PEEL8-NEXT: store i32 [[TMP8]], ptr [[ARRAYIDX]], align 4
; PEEL8-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; PEEL8-NEXT: br i1 true, label [[FOR_BODY]], label [[FOR_EXIT_LOOPEXIT:%.*]], !llvm.loop [[LOOP0:![0-9]+]]
; PEEL8: for.exit.loopexit:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* @a, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [8 x i32], ptr @a, i64 0, i64 %indvars.iv
%0 = trunc i64 %indvars.iv to i32
- store i32 %0, i32* %arrayidx, align 4
+ store i32 %0, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp ne i64 %indvars.iv.next, 8
br i1 %exitcond, label %for.body, label %for.exit
; CHECK-NEXT: br label [[L2_HEADER]]
; CHECK: L3_body:
; CHECK-NEXT: [[Y1_LCSSA:%.*]] = phi i64 [ [[Y1]], [[L3_HEADER]] ]
-; CHECK-NEXT: store i64 [[Y1_LCSSA]], i64* undef, align 8
+; CHECK-NEXT: store i64 [[Y1_LCSSA]], ptr undef, align 8
; CHECK-NEXT: br i1 false, label [[L3_LATCH:%.*]], label [[L1_LATCH:%.*]]
; CHECK: L3_latch:
; CHECK-NEXT: ret void
br label %L2_header
L3_body:
- store i64 %y1, i64* undef
+ store i64 %y1, ptr undef
br i1 false, label %L3_latch, label %L1_latch
L3_latch:
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[L1_HEADER:%.*]]
; CHECK: L1_header:
-; CHECK-NEXT: [[A:%.*]] = phi i8* [ [[B:%.*]], [[L1_LATCH:%.*]] ], [ null, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[A:%.*]] = phi ptr [ [[B:%.*]], [[L1_LATCH:%.*]] ], [ null, [[ENTRY:%.*]] ]
; CHECK-NEXT: br i1 undef, label [[L2_HEADER_PREHEADER:%.*]], label [[L1_LATCH]]
; CHECK: L2_header.preheader:
; CHECK-NEXT: br label [[L2_HEADER:%.*]]
; CHECK: L2_header:
; CHECK-NEXT: br i1 false, label [[L2_LATCH:%.*]], label [[L1_LATCH_LOOPEXIT:%.*]]
; CHECK: L2_latch:
-; CHECK-NEXT: [[A_LCSSA:%.*]] = phi i8* [ [[A]], [[L2_HEADER]] ]
+; CHECK-NEXT: [[A_LCSSA:%.*]] = phi ptr [ [[A]], [[L2_HEADER]] ]
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: L1_latch.loopexit:
; CHECK-NEXT: br label [[L1_LATCH]]
; CHECK: L1_latch:
-; CHECK-NEXT: [[B]] = phi i8* [ undef, [[L1_HEADER]] ], [ null, [[L1_LATCH_LOOPEXIT]] ]
+; CHECK-NEXT: [[B]] = phi ptr [ undef, [[L1_HEADER]] ], [ null, [[L1_LATCH_LOOPEXIT]] ]
; CHECK-NEXT: br label [[L1_HEADER]]
; CHECK: Exit:
-; CHECK-NEXT: [[A_LCSSA2:%.*]] = phi i8* [ [[A_LCSSA]], [[L2_LATCH]] ]
+; CHECK-NEXT: [[A_LCSSA2:%.*]] = phi ptr [ [[A_LCSSA]], [[L2_LATCH]] ]
; CHECK-NEXT: ret void
;
entry:
br label %L1_header
L1_header:
- %a = phi i8* [ %b, %L1_latch ], [ null, %entry ]
+ %a = phi ptr [ %b, %L1_latch ], [ null, %entry ]
br i1 undef, label %L2_header, label %L1_latch
L2_header:
br i1 true, label %L2_exit, label %L2_header
L1_latch:
- %b = phi i8* [ undef, %L1_header ], [ null, %L2_header ]
+ %b = phi ptr [ undef, %L1_header ], [ null, %L2_header ]
br label %L1_header
L2_exit:
- %a_lcssa1 = phi i8* [ %a, %L2_latch ]
+ %a_lcssa1 = phi ptr [ %a, %L2_latch ]
br label %Exit
Exit:
- %a_lcssa2 = phi i8* [ %a_lcssa1, %L2_exit ]
+ %a_lcssa2 = phi ptr [ %a_lcssa1, %L2_exit ]
ret void
}
; CHECK-NEXT: br label [[INNER2_INDIRECT_EXIT:%.*]]
; CHECK: inner2_indirect_exit:
; CHECK-NEXT: [[A:%.*]] = phi i32 [ [[B:%.*]], [[INNER2_LATCH:%.*]] ], [ undef, [[INNER1]] ]
-; CHECK-NEXT: indirectbr i8* undef, [label [[INNER2_LATCH]], label [[INNER3:%.*]], label %outer_latch]
+; CHECK-NEXT: indirectbr ptr undef, [label [[INNER2_LATCH]], label [[INNER3:%.*]], label %outer_latch]
; CHECK: inner2_latch:
-; CHECK-NEXT: [[B]] = load i32, i32* undef, align 8
+; CHECK-NEXT: [[B]] = load i32, ptr undef, align 8
; CHECK-NEXT: br label [[INNER2_INDIRECT_EXIT]]
; CHECK: inner3:
; CHECK-NEXT: [[A_LCSSA:%.*]] = phi i32 [ [[A_LCSSA]], [[INNER3]] ], [ [[A]], [[INNER2_INDIRECT_EXIT]] ]
inner2_indirect_exit:
%a = phi i32 [ %b, %inner2_latch ], [ undef, %inner2_indirect_exit.preheader ]
- indirectbr i8* undef, [label %inner2_latch, label %inner3, label %outer_latch]
+ indirectbr ptr undef, [label %inner2_latch, label %inner3, label %outer_latch]
inner2_latch:
- %b = load i32, i32* undef, align 8
+ %b = load i32, ptr undef, align 8
br label %inner2_indirect_exit
inner3:
source_filename = "reduced.ll"
-%"class.std::__Cr::basic_ostream" = type { i32 (...)**, %"class.std::__Cr::basic_ios" }
-%"class.std::__Cr::basic_ios" = type { %"class.std::__Cr::ios_base", %"class.std::__Cr::basic_ostream"*, i32 }
-%"class.std::__Cr::ios_base" = type { i32 (...)**, i32, i32, i32, i32, i32, i8*, i8*, void (i32, %"class.std::__Cr::ios_base"*, i32)**, i32*, i32, i32, i32*, i32, i32, i8**, i32, i32 }
-%"class.v8::internal::wasm::StructType" = type { i32, i32*, %"class.v8::internal::wasm::ValueType"*, i8* }
+%"class.std::__Cr::basic_ostream" = type { ptr, %"class.std::__Cr::basic_ios" }
+%"class.std::__Cr::basic_ios" = type { %"class.std::__Cr::ios_base", ptr, i32 }
+%"class.std::__Cr::ios_base" = type { ptr, i32, i32, i32, i32, i32, ptr, ptr, ptr, ptr, i32, i32, ptr, i32, i32, ptr, i32, i32 }
+%"class.v8::internal::wasm::StructType" = type { i32, ptr, ptr, ptr }
%"class.v8::internal::wasm::ValueType" = type { i32 }
$_ZNK2v88internal4wasm10StructType12field_offsetEj = comdat any
-declare hidden %"class.std::__Cr::basic_ostream"* @_ZNSt4__CrlsINS_11char_traitsIcEEEERNS_13basic_ostreamIcT_EES6_PKc() local_unnamed_addr
+declare hidden ptr @_ZNSt4__CrlsINS_11char_traitsIcEEEERNS_13basic_ostreamIcT_EES6_PKc() local_unnamed_addr
define hidden void @_ZN2v88internal10WasmStruct15WasmStructPrintERNSt4__Cr13basic_ostreamIcNS2_11char_traitsIcEEEE() local_unnamed_addr align 2 {
; CHECK-LABEL: @_ZN2v88internal10WasmStruct15WasmStructPrintERNSt4__Cr13basic_ostreamIcNS2_11char_traitsIcEEEE(
; CHECK-NEXT: i8 10, label [[SW_BB31]]
; CHECK-NEXT: ]
; CHECK: sw.bb33.peel:
-; CHECK-NEXT: [[CALL34_PEEL:%.*]] = tail call %"class.std::__Cr::basic_ostream"* @_ZNSt4__CrlsINS_11char_traitsIcEEEERNS_13basic_ostreamIcT_EES6_PKc()
+; CHECK-NEXT: [[CALL34_PEEL:%.*]] = tail call ptr @_ZNSt4__CrlsINS_11char_traitsIcEEEERNS_13basic_ostreamIcT_EES6_PKc()
; CHECK-NEXT: br label [[FOR_INC_PEEL]]
; CHECK: for.inc.peel:
; CHECK-NEXT: [[CALL6_PEEL:%.*]] = tail call i32 @_ZNK2v88internal4wasm10StructType11field_countEv()
; CHECK-NEXT: tail call void @_ZN2v84baseL18ReadUnalignedValueINS_8internal6ObjectEEET_j()
; CHECK-NEXT: unreachable
; CHECK: sw.bb33:
-; CHECK-NEXT: [[CALL34:%.*]] = tail call %"class.std::__Cr::basic_ostream"* @_ZNSt4__CrlsINS_11char_traitsIcEEEERNS_13basic_ostreamIcT_EES6_PKc()
+; CHECK-NEXT: [[CALL34:%.*]] = tail call ptr @_ZNSt4__CrlsINS_11char_traitsIcEEEERNS_13basic_ostreamIcT_EES6_PKc()
; CHECK-NEXT: br label [[FOR_INC]]
; CHECK: for.inc:
; CHECK-NEXT: [[CALL6:%.*]] = tail call i32 @_ZNK2v88internal4wasm10StructType11field_countEv()
unreachable
sw.bb33: ; preds = %for.body
- %call34 = tail call %"class.std::__Cr::basic_ostream"* @_ZNSt4__CrlsINS_11char_traitsIcEEEERNS_13basic_ostreamIcT_EES6_PKc()
+ %call34 = tail call ptr @_ZNSt4__CrlsINS_11char_traitsIcEEEERNS_13basic_ostreamIcT_EES6_PKc()
br label %for.inc
for.inc: ; preds = %for.body, %sw.bb33
declare hidden i32 @_ZNK2v88internal4wasm10StructType11field_countEv() local_unnamed_addr align 2
-define linkonce_odr hidden i32 @_ZNK2v88internal4wasm10StructType12field_offsetEj(%"class.v8::internal::wasm::StructType"* %this, i32 %index) local_unnamed_addr comdat align 2 {
+define linkonce_odr hidden i32 @_ZNK2v88internal4wasm10StructType12field_offsetEj(ptr %this, i32 %index) local_unnamed_addr comdat align 2 {
; CHECK-LABEL: @_ZNK2v88internal4wasm10StructType12field_offsetEj(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[INDEX:%.*]], 0
; Basic test is fully unrolled and we revisit the post-unroll new sibling
; loops, including the ones that used to be child loops.
-define void @full_unroll(i1* %ptr) {
+define void @full_unroll(ptr %ptr) {
; CHECK-LABEL: OptimizationRemarkEmitterAnalysis on full_unroll
; CHECK-NOT: LoopFullUnrollPass
br label %l0
l0:
- %cond.0 = load volatile i1, i1* %ptr
+ %cond.0 = load volatile i1, ptr %ptr
br i1 %cond.0, label %l0.0.ph, label %exit
l0.0.ph:
br label %l0.0.0
l0.0.0:
- %cond.0.0.0 = load volatile i1, i1* %ptr
+ %cond.0.0.0 = load volatile i1, ptr %ptr
br i1 %cond.0.0.0, label %l0.0.0, label %l0.0.1.ph
; CHECK: LoopFullUnrollPass on l0.0.0
; CHECK-NOT: LoopFullUnrollPass
br label %l0.0.1
l0.0.1:
- %cond.0.0.1 = load volatile i1, i1* %ptr
+ %cond.0.0.1 = load volatile i1, ptr %ptr
br i1 %cond.0.0.1, label %l0.0.1, label %l0.0.latch
; CHECK: LoopFullUnrollPass on l0.0.1
; CHECK-NOT: LoopFullUnrollPass
; Now we test forced runtime partial unrolling with metadata. Here we end up
; duplicating child loops without changing their structure and so they aren't by
; default visited, but will be visited with a special parameter.
-define void @partial_unroll(i32 %count, i1* %ptr) {
+define void @partial_unroll(i32 %count, ptr %ptr) {
; CHECK-LABEL: OptimizationRemarkEmitterAnalysis on partial_unroll
; CHECK-NOT: LoopFullUnrollPass
br label %l0
l0:
- %cond.0 = load volatile i1, i1* %ptr
+ %cond.0 = load volatile i1, ptr %ptr
br i1 %cond.0, label %l0.0.ph, label %exit
l0.0.ph:
br label %l0.0.0
l0.0.0:
- %cond.0.0.0 = load volatile i1, i1* %ptr
+ %cond.0.0.0 = load volatile i1, ptr %ptr
br i1 %cond.0.0.0, label %l0.0.0, label %l0.0.1.ph
; CHECK: LoopFullUnrollPass on l0.0.0
; CHECK-NOT: LoopFullUnrollPass
br label %l0.0.1
l0.0.1:
- %cond.0.0.1 = load volatile i1, i1* %ptr
+ %cond.0.0.1 = load volatile i1, ptr %ptr
br i1 %cond.0.0.1, label %l0.0.1, label %l0.0.latch
; CHECK: LoopFullUnrollPass on l0.0.1
; CHECK-NOT: LoopFullUnrollPass
; CHECK: remark: {{.*}}: unrolled loop by a factor of 2 with run-time trip count
; CHECK: @widget
; CHECK: ret void
-define void @widget(double* %arg, double* %arg1, double* %p, i64* %q1, i64* %q2, i1 %c) local_unnamed_addr {
+define void @widget(ptr %arg, ptr %arg1, ptr %p, ptr %q1, ptr %q2, i1 %c) local_unnamed_addr {
entry:
br label %header.outer
header.outer: ; preds = %latch.outer, %entry
- %tmp = phi double* [ %tmp8, %latch.outer ], [ %arg, %entry ]
+ %tmp = phi ptr [ %tmp8, %latch.outer ], [ %arg, %entry ]
br label %header.inner
header.inner: ; preds = %latch.inner, %header.outer
- %tmp5 = load i64, i64* %q1, align 8
- %tmp6 = icmp eq double* %p, %arg
+ %tmp5 = load i64, ptr %q1, align 8
+ %tmp6 = icmp eq ptr %p, %arg
br i1 %c, label %exiting.inner, label %latch.outer
exiting.inner: ; preds = %latch.inner, %header.outer
br i1 %c, label %latch.inner, label %latch.outer
latch.inner: ; preds = %header.inner
- store i64 %tmp5, i64* %q2, align 8
+ store i64 %tmp5, ptr %q2, align 8
br label %header.inner
latch.outer: ; preds = %header.inner
- store double 0.0, double* %p, align 8
- %tmp8 = getelementptr inbounds double, double* %tmp, i64 1
- %tmp9 = icmp eq double* %tmp8, %arg1
+ store double 0.0, ptr %p, align 8
+ %tmp8 = getelementptr inbounds double, ptr %tmp, i64 1
+ %tmp9 = icmp eq ptr %tmp8, %arg1
br i1 %tmp9, label %exit, label %header.outer
exit: ; preds = %latch.outer
; RUN: opt < %s -passes=loop-unroll -unroll-runtime=true -unroll-runtime-epilog=true -unroll-runtime-other-exit-predictable=true -verify-loop-lcssa -verify-dom-info -verify-loop-info -S | FileCheck %s --check-prefix=ENABLED
; RUN: opt < %s -passes=loop-unroll -unroll-runtime=true -unroll-runtime-epilog=true -unroll-runtime-other-exit-predictable=false -verify-loop-lcssa -verify-dom-info -verify-loop-info -S | FileCheck %s --check-prefix=DISABLED
-define i32 @test(i32* nocapture %a, i64 %n) {
+define i32 @test(ptr nocapture %a, i64 %n) {
; ENABLED-LABEL: @test(
; ENABLED-NEXT: entry:
; ENABLED-NEXT: [[TMP0:%.*]] = freeze i64 [[N:%.*]]
; ENABLED-NEXT: [[CMP:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP]], label [[FOR_EXIT2_LOOPEXIT:%.*]], label [[FOR_BODY:%.*]]
; ENABLED: for.body:
-; ENABLED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
-; ENABLED-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; ENABLED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; ENABLED-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; ENABLED-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[SUM_02]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1
; ENABLED-NEXT: [[NITER_NEXT:%.*]] = add nuw nsw i64 [[NITER]], 1
; ENABLED-NEXT: [[CMP_1:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_1]], label [[FOR_EXIT2_LOOPEXIT]], label [[FOR_BODY_1:%.*]]
; ENABLED: for.body.1:
-; ENABLED-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT]]
-; ENABLED-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
+; ENABLED-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX_1]], align 4
; ENABLED-NEXT: [[ADD_1:%.*]] = add nsw i32 [[TMP4]], [[ADD]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_1:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT]], 1
; ENABLED-NEXT: [[NITER_NEXT_1:%.*]] = add nuw nsw i64 [[NITER_NEXT]], 1
; ENABLED-NEXT: [[CMP_2:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_2]], label [[FOR_EXIT2_LOOPEXIT]], label [[FOR_BODY_2:%.*]]
; ENABLED: for.body.2:
-; ENABLED-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_1]]
-; ENABLED-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX_2]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_1]]
+; ENABLED-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX_2]], align 4
; ENABLED-NEXT: [[ADD_2:%.*]] = add nsw i32 [[TMP5]], [[ADD_1]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_2:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_1]], 1
; ENABLED-NEXT: [[NITER_NEXT_2:%.*]] = add nuw nsw i64 [[NITER_NEXT_1]], 1
; ENABLED-NEXT: [[CMP_3:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_3]], label [[FOR_EXIT2_LOOPEXIT]], label [[FOR_BODY_3:%.*]]
; ENABLED: for.body.3:
-; ENABLED-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_2]]
-; ENABLED-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX_3]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_2]]
+; ENABLED-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX_3]], align 4
; ENABLED-NEXT: [[ADD_3:%.*]] = add nsw i32 [[TMP6]], [[ADD_2]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_3:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_2]], 1
; ENABLED-NEXT: [[NITER_NEXT_3:%.*]] = add nuw nsw i64 [[NITER_NEXT_2]], 1
; ENABLED-NEXT: [[CMP_4:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_4]], label [[FOR_EXIT2_LOOPEXIT]], label [[FOR_BODY_4:%.*]]
; ENABLED: for.body.4:
-; ENABLED-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_3]]
-; ENABLED-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX_4]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_3]]
+; ENABLED-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX_4]], align 4
; ENABLED-NEXT: [[ADD_4:%.*]] = add nsw i32 [[TMP7]], [[ADD_3]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_4:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_3]], 1
; ENABLED-NEXT: [[NITER_NEXT_4:%.*]] = add nuw nsw i64 [[NITER_NEXT_3]], 1
; ENABLED-NEXT: [[CMP_5:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_5]], label [[FOR_EXIT2_LOOPEXIT]], label [[FOR_BODY_5:%.*]]
; ENABLED: for.body.5:
-; ENABLED-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_4]]
-; ENABLED-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX_5]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_4]]
+; ENABLED-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX_5]], align 4
; ENABLED-NEXT: [[ADD_5:%.*]] = add nsw i32 [[TMP8]], [[ADD_4]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_5:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_4]], 1
; ENABLED-NEXT: [[NITER_NEXT_5:%.*]] = add nuw nsw i64 [[NITER_NEXT_4]], 1
; ENABLED-NEXT: [[CMP_6:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_6]], label [[FOR_EXIT2_LOOPEXIT]], label [[FOR_BODY_6:%.*]]
; ENABLED: for.body.6:
-; ENABLED-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_5]]
-; ENABLED-NEXT: [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX_6]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_5]]
+; ENABLED-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX_6]], align 4
; ENABLED-NEXT: [[ADD_6:%.*]] = add nsw i32 [[TMP9]], [[ADD_5]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_6:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_5]], 1
; ENABLED-NEXT: [[NITER_NEXT_6:%.*]] = add nuw nsw i64 [[NITER_NEXT_5]], 1
; ENABLED-NEXT: [[CMP_7:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_7]], label [[FOR_EXIT2_LOOPEXIT]], label [[FOR_BODY_7]]
; ENABLED: for.body.7:
-; ENABLED-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_6]]
-; ENABLED-NEXT: [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX_7]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_6]]
+; ENABLED-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX_7]], align 4
; ENABLED-NEXT: [[ADD_7]] = add nsw i32 [[TMP10]], [[ADD_6]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_7]] = add i64 [[INDVARS_IV_NEXT_6]], 1
; ENABLED-NEXT: [[NITER_NEXT_7]] = add i64 [[NITER_NEXT_6]], 1
; ENABLED-NEXT: [[CMP_EPIL:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_EPIL]], label [[FOR_EXIT2_LOOPEXIT2:%.*]], label [[FOR_BODY_EPIL]]
; ENABLED: for.body.epil:
-; ENABLED-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_EPIL]]
-; ENABLED-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX_EPIL]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_EPIL]]
+; ENABLED-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX_EPIL]], align 4
; ENABLED-NEXT: [[ADD_EPIL]] = add nsw i32 [[TMP11]], [[SUM_02_EPIL]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_EPIL]] = add i64 [[INDVARS_IV_EPIL]], 1
; ENABLED-NEXT: [[EXITCOND_EPIL:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_EPIL]], [[N]]
; DISABLED-NEXT: [[CMP:%.*]] = icmp eq i64 [[N:%.*]], 42
; DISABLED-NEXT: br i1 [[CMP]], label [[FOR_EXIT2:%.*]], label [[FOR_BODY]]
; DISABLED: for.body:
-; DISABLED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
-; DISABLED-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; DISABLED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; DISABLED-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; DISABLED-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[SUM_02]]
; DISABLED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; DISABLED-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
br i1 %cmp, label %for.exit2, label %for.body
for.body:
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, %n
; CHECK: ![[#PROF]] = !{!"branch_weights", i32 1, i32 9999}
; CHECK: ![[#PROF2]] = !{!"branch_weights", i32 3, i32 1}
-define i3 @test(i3* %a, i3 %n) {
+define i3 @test(ptr %a, i3 %n) {
entry:
%cmp1 = icmp eq i3 %n, 0
br i1 %cmp1, label %for.end, label %for.body
for.body:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i3 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i3, i3* %a, i64 %indvars.iv
- %0 = load i3, i3* %arrayidx
+ %arrayidx = getelementptr inbounds i3, ptr %a, i64 %indvars.iv
+ %0 = load i3, ptr %arrayidx
%add = add nsw i3 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i3
; There are 2 values passed to the exit blocks that are calculated at every iteration.
; %sum.02 and %add. Both of these are incoming values for phi from every exiting
; unrolled block.
-define i32 @test2(i32* nocapture %a, i64 %n) {
+define i32 @test2(ptr nocapture %a, i64 %n) {
; EPILOG-LABEL: @test2(
; EPILOG-NEXT: entry:
; EPILOG-NEXT: %0 = freeze i64 %n
; EPILOG-NEXT: %cmp = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp, label %for.exit2.loopexit, label %for.body
; EPILOG: for.body:
-; EPILOG-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; EPILOG-NEXT: %3 = load i32, i32* %arrayidx, align 4
+; EPILOG-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; EPILOG-NEXT: %3 = load i32, ptr %arrayidx, align 4
; EPILOG-NEXT: %add = add nsw i32 %3, %sum.02
; EPILOG-NEXT: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
; EPILOG-NEXT: %niter.next = add nuw nsw i64 %niter, 1
; EPILOG-NEXT: %cmp.1 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.1, label %for.exit2.loopexit, label %for.body.1
; EPILOG: for.body.1:
-; EPILOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; EPILOG-NEXT: %4 = load i32, i32* %arrayidx.1, align 4
+; EPILOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; EPILOG-NEXT: %4 = load i32, ptr %arrayidx.1, align 4
; EPILOG-NEXT: %add.1 = add nsw i32 %4, %add
; EPILOG-NEXT: %indvars.iv.next.1 = add nuw nsw i64 %indvars.iv.next, 1
; EPILOG-NEXT: %niter.next.1 = add nuw nsw i64 %niter.next, 1
; EPILOG-NEXT: %cmp.2 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.2, label %for.exit2.loopexit, label %for.body.2
; EPILOG: for.body.2:
-; EPILOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.1
-; EPILOG-NEXT: %5 = load i32, i32* %arrayidx.2, align 4
+; EPILOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.1
+; EPILOG-NEXT: %5 = load i32, ptr %arrayidx.2, align 4
; EPILOG-NEXT: %add.2 = add nsw i32 %5, %add.1
; EPILOG-NEXT: %indvars.iv.next.2 = add nuw nsw i64 %indvars.iv.next.1, 1
; EPILOG-NEXT: %niter.next.2 = add nuw nsw i64 %niter.next.1, 1
; EPILOG-NEXT: %cmp.3 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.3, label %for.exit2.loopexit, label %for.body.3
; EPILOG: for.body.3:
-; EPILOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.2
-; EPILOG-NEXT: %6 = load i32, i32* %arrayidx.3, align 4
+; EPILOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.2
+; EPILOG-NEXT: %6 = load i32, ptr %arrayidx.3, align 4
; EPILOG-NEXT: %add.3 = add nsw i32 %6, %add.2
; EPILOG-NEXT: %indvars.iv.next.3 = add nuw nsw i64 %indvars.iv.next.2, 1
; EPILOG-NEXT: %niter.next.3 = add nuw nsw i64 %niter.next.2, 1
; EPILOG-NEXT: %cmp.4 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.4, label %for.exit2.loopexit, label %for.body.4
; EPILOG: for.body.4:
-; EPILOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.3
-; EPILOG-NEXT: %7 = load i32, i32* %arrayidx.4, align 4
+; EPILOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.3
+; EPILOG-NEXT: %7 = load i32, ptr %arrayidx.4, align 4
; EPILOG-NEXT: %add.4 = add nsw i32 %7, %add.3
; EPILOG-NEXT: %indvars.iv.next.4 = add nuw nsw i64 %indvars.iv.next.3, 1
; EPILOG-NEXT: %niter.next.4 = add nuw nsw i64 %niter.next.3, 1
; EPILOG-NEXT: %cmp.5 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.5, label %for.exit2.loopexit, label %for.body.5
; EPILOG: for.body.5:
-; EPILOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.4
-; EPILOG-NEXT: %8 = load i32, i32* %arrayidx.5, align 4
+; EPILOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.4
+; EPILOG-NEXT: %8 = load i32, ptr %arrayidx.5, align 4
; EPILOG-NEXT: %add.5 = add nsw i32 %8, %add.4
; EPILOG-NEXT: %indvars.iv.next.5 = add nuw nsw i64 %indvars.iv.next.4, 1
; EPILOG-NEXT: %niter.next.5 = add nuw nsw i64 %niter.next.4, 1
; EPILOG-NEXT: %cmp.6 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.6, label %for.exit2.loopexit, label %for.body.6
; EPILOG: for.body.6:
-; EPILOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.5
-; EPILOG-NEXT: %9 = load i32, i32* %arrayidx.6, align 4
+; EPILOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.5
+; EPILOG-NEXT: %9 = load i32, ptr %arrayidx.6, align 4
; EPILOG-NEXT: %add.6 = add nsw i32 %9, %add.5
; EPILOG-NEXT: %indvars.iv.next.6 = add nuw nsw i64 %indvars.iv.next.5, 1
; EPILOG-NEXT: %niter.next.6 = add nuw nsw i64 %niter.next.5, 1
; EPILOG-NEXT: %cmp.7 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.7, label %for.exit2.loopexit, label %for.body.7
; EPILOG: for.body.7:
-; EPILOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.6
-; EPILOG-NEXT: %10 = load i32, i32* %arrayidx.7, align 4
+; EPILOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.6
+; EPILOG-NEXT: %10 = load i32, ptr %arrayidx.7, align 4
; EPILOG-NEXT: %add.7 = add nsw i32 %10, %add.6
; EPILOG-NEXT: %indvars.iv.next.7 = add i64 %indvars.iv.next.6, 1
; EPILOG-NEXT: %niter.next.7 = add i64 %niter.next.6, 1
; EPILOG-NEXT: %cmp.epil = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.epil, label %for.exit2.loopexit2, label %for.body.epil
; EPILOG: for.body.epil:
-; EPILOG-NEXT: %arrayidx.epil = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.epil
-; EPILOG-NEXT: %11 = load i32, i32* %arrayidx.epil, align 4
+; EPILOG-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.epil
+; EPILOG-NEXT: %11 = load i32, ptr %arrayidx.epil, align 4
; EPILOG-NEXT: %add.epil = add nsw i32 %11, %sum.02.epil
; EPILOG-NEXT: %indvars.iv.next.epil = add i64 %indvars.iv.epil, 1
; EPILOG-NEXT: %exitcond.epil = icmp eq i64 %indvars.iv.next.epil, %n
; EPILOG-BLOCK-NEXT: %cmp = icmp eq i64 %n, 42
; EPILOG-BLOCK-NEXT: br i1 %cmp, label %for.exit2.loopexit, label %for.body
; EPILOG-BLOCK: for.body:
-; EPILOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; EPILOG-BLOCK-NEXT: %3 = load i32, i32* %arrayidx, align 4
+; EPILOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; EPILOG-BLOCK-NEXT: %3 = load i32, ptr %arrayidx, align 4
; EPILOG-BLOCK-NEXT: %add = add nsw i32 %3, %sum.02
; EPILOG-BLOCK-NEXT: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
; EPILOG-BLOCK-NEXT: %niter.next = add nuw nsw i64 %niter, 1
; EPILOG-BLOCK-NEXT: %cmp.1 = icmp eq i64 %n, 42
; EPILOG-BLOCK-NEXT: br i1 %cmp.1, label %for.exit2.loopexit, label %for.body.1
; EPILOG-BLOCK: for.body.1:
-; EPILOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; EPILOG-BLOCK-NEXT: %4 = load i32, i32* %arrayidx.1, align 4
+; EPILOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; EPILOG-BLOCK-NEXT: %4 = load i32, ptr %arrayidx.1, align 4
; EPILOG-BLOCK-NEXT: %add.1 = add nsw i32 %4, %add
; EPILOG-BLOCK-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv.next, 1
; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter.next, 1
; EPILOG-BLOCK-NEXT: %cmp.epil = icmp eq i64 %n, 42
; EPILOG-BLOCK-NEXT: br i1 %cmp.epil, label %for.exit2, label %for.body.epil
; EPILOG-BLOCK: for.body.epil:
-; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.unr
-; EPILOG-BLOCK-NEXT: %5 = load i32, i32* %arrayidx.epil, align 4
+; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.unr
+; EPILOG-BLOCK-NEXT: %5 = load i32, ptr %arrayidx.epil, align 4
; EPILOG-BLOCK-NEXT: %add.epil = add nsw i32 %5, %sum.02.unr
; EPILOG-BLOCK-NEXT: br label %for.end
; EPILOG-BLOCK: for.end:
; PROLOG-NEXT: %cmp.prol = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.prol, label %for.exit2.loopexit1, label %for.body.prol
; PROLOG: for.body.prol:
-; PROLOG-NEXT: %arrayidx.prol = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.prol
-; PROLOG-NEXT: %2 = load i32, i32* %arrayidx.prol, align 4
+; PROLOG-NEXT: %arrayidx.prol = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.prol
+; PROLOG-NEXT: %2 = load i32, ptr %arrayidx.prol, align 4
; PROLOG-NEXT: %add.prol = add nsw i32 %2, %sum.02.prol
; PROLOG-NEXT: %indvars.iv.next.prol = add i64 %indvars.iv.prol, 1
; PROLOG-NEXT: %exitcond.prol = icmp eq i64 %indvars.iv.next.prol, %n
; PROLOG-NEXT: %cmp = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp, label %for.exit2.loopexit, label %for.body
; PROLOG: for.body:
-; PROLOG-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; PROLOG-NEXT: %4 = load i32, i32* %arrayidx, align 4
+; PROLOG-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; PROLOG-NEXT: %4 = load i32, ptr %arrayidx, align 4
; PROLOG-NEXT: %add = add nsw i32 %4, %sum.02
; PROLOG-NEXT: %indvars.iv.next = add i64 %indvars.iv, 1
; PROLOG-NEXT: br i1 false, label %for.exit2.loopexit, label %for.exiting_block.1
; PROLOG-NEXT: %cmp.1 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.1, label %for.exit2.loopexit, label %for.body.1
; PROLOG: for.body.1:
-; PROLOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; PROLOG-NEXT: %5 = load i32, i32* %arrayidx.1, align 4
+; PROLOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; PROLOG-NEXT: %5 = load i32, ptr %arrayidx.1, align 4
; PROLOG-NEXT: %add.1 = add nsw i32 %5, %add
; PROLOG-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv.next, 1
; PROLOG-NEXT: br i1 false, label %for.exit2.loopexit, label %for.exiting_block.2
; PROLOG-NEXT: %cmp.2 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.2, label %for.exit2.loopexit, label %for.body.2
; PROLOG: for.body.2:
-; PROLOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.1
-; PROLOG-NEXT: %6 = load i32, i32* %arrayidx.2, align 4
+; PROLOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.1
+; PROLOG-NEXT: %6 = load i32, ptr %arrayidx.2, align 4
; PROLOG-NEXT: %add.2 = add nsw i32 %6, %add.1
; PROLOG-NEXT: %indvars.iv.next.2 = add i64 %indvars.iv.next.1, 1
; PROLOG-NEXT: br i1 false, label %for.exit2.loopexit, label %for.exiting_block.3
; PROLOG-NEXT: %cmp.3 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.3, label %for.exit2.loopexit, label %for.body.3
; PROLOG: for.body.3:
-; PROLOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.2
-; PROLOG-NEXT: %7 = load i32, i32* %arrayidx.3, align 4
+; PROLOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.2
+; PROLOG-NEXT: %7 = load i32, ptr %arrayidx.3, align 4
; PROLOG-NEXT: %add.3 = add nsw i32 %7, %add.2
; PROLOG-NEXT: %indvars.iv.next.3 = add i64 %indvars.iv.next.2, 1
; PROLOG-NEXT: br i1 false, label %for.exit2.loopexit, label %for.exiting_block.4
; PROLOG-NEXT: %cmp.4 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.4, label %for.exit2.loopexit, label %for.body.4
; PROLOG: for.body.4:
-; PROLOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.3
-; PROLOG-NEXT: %8 = load i32, i32* %arrayidx.4, align 4
+; PROLOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.3
+; PROLOG-NEXT: %8 = load i32, ptr %arrayidx.4, align 4
; PROLOG-NEXT: %add.4 = add nsw i32 %8, %add.3
; PROLOG-NEXT: %indvars.iv.next.4 = add i64 %indvars.iv.next.3, 1
; PROLOG-NEXT: br i1 false, label %for.exit2.loopexit, label %for.exiting_block.5
; PROLOG-NEXT: %cmp.5 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.5, label %for.exit2.loopexit, label %for.body.5
; PROLOG: for.body.5:
-; PROLOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.4
-; PROLOG-NEXT: %9 = load i32, i32* %arrayidx.5, align 4
+; PROLOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.4
+; PROLOG-NEXT: %9 = load i32, ptr %arrayidx.5, align 4
; PROLOG-NEXT: %add.5 = add nsw i32 %9, %add.4
; PROLOG-NEXT: %indvars.iv.next.5 = add i64 %indvars.iv.next.4, 1
; PROLOG-NEXT: br i1 false, label %for.exit2.loopexit, label %for.exiting_block.6
; PROLOG-NEXT: %cmp.6 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.6, label %for.exit2.loopexit, label %for.body.6
; PROLOG: for.body.6:
-; PROLOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.5
-; PROLOG-NEXT: %10 = load i32, i32* %arrayidx.6, align 4
+; PROLOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.5
+; PROLOG-NEXT: %10 = load i32, ptr %arrayidx.6, align 4
; PROLOG-NEXT: %add.6 = add nsw i32 %10, %add.5
; PROLOG-NEXT: %indvars.iv.next.6 = add i64 %indvars.iv.next.5, 1
; PROLOG-NEXT: br i1 false, label %for.exit2.loopexit, label %for.exiting_block.7
; PROLOG-NEXT: %cmp.7 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.7, label %for.exit2.loopexit, label %for.body.7
; PROLOG: for.body.7:
-; PROLOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.6
-; PROLOG-NEXT: %11 = load i32, i32* %arrayidx.7, align 4
+; PROLOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.6
+; PROLOG-NEXT: %11 = load i32, ptr %arrayidx.7, align 4
; PROLOG-NEXT: %add.7 = add nsw i32 %11, %add.6
; PROLOG-NEXT: %indvars.iv.next.7 = add i64 %indvars.iv.next.6, 1
; PROLOG-NEXT: %exitcond.7 = icmp eq i64 %indvars.iv.next.7, %n
; PROLOG-BLOCK-NEXT: %cmp.prol = icmp eq i64 %n, 42
; PROLOG-BLOCK-NEXT: br i1 %cmp.prol, label %for.exit2, label %for.body.prol
; PROLOG-BLOCK: for.body.prol:
-; PROLOG-BLOCK-NEXT: %2 = load i32, i32* %a, align 4
+; PROLOG-BLOCK-NEXT: %2 = load i32, ptr %a, align 4
; PROLOG-BLOCK-NEXT: br label %header.prol.loopexit
; PROLOG-BLOCK: header.prol.loopexit:
; PROLOG-BLOCK-NEXT: %sum.0.lcssa.unr = phi i32 [ undef, %entry ], [ %2, %for.body.prol ]
; PROLOG-BLOCK-NEXT: %cmp = icmp eq i64 %n, 42
; PROLOG-BLOCK-NEXT: br i1 %cmp, label %for.exit2.loopexit, label %for.body
; PROLOG-BLOCK: for.body:
-; PROLOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; PROLOG-BLOCK-NEXT: %4 = load i32, i32* %arrayidx, align 4
+; PROLOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; PROLOG-BLOCK-NEXT: %4 = load i32, ptr %arrayidx, align 4
; PROLOG-BLOCK-NEXT: %add = add nsw i32 %4, %sum.02
; PROLOG-BLOCK-NEXT: %indvars.iv.next = add i64 %indvars.iv, 1
; PROLOG-BLOCK-NEXT: br i1 false, label %for.exit2.loopexit, label %for.exiting_block.1
; PROLOG-BLOCK-NEXT: %cmp.1 = icmp eq i64 %n, 42
; PROLOG-BLOCK-NEXT: br i1 %cmp.1, label %for.exit2.loopexit, label %for.body.1
; PROLOG-BLOCK: for.body.1:
-; PROLOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; PROLOG-BLOCK-NEXT: %5 = load i32, i32* %arrayidx.1, align 4
+; PROLOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; PROLOG-BLOCK-NEXT: %5 = load i32, ptr %arrayidx.1, align 4
; PROLOG-BLOCK-NEXT: %add.1 = add nsw i32 %5, %add
; PROLOG-BLOCK-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv.next, 1
; PROLOG-BLOCK-NEXT: %exitcond.1 = icmp eq i64 %indvars.iv.next.1, %n
br i1 %cmp, label %for.exit2, label %for.body
for.body:
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, %n
; FIXME: Support multiple exiting blocks to the same latch exit block.
; Three exiting blocks where header and latch exit to same LatchExit.
-define i32 @hdr_latch_same_exit(i32* nocapture %a, i64 %n, i1 %cond) {
+define i32 @hdr_latch_same_exit(ptr nocapture %a, i64 %n, i1 %cond) {
; EPILOG-LABEL: @hdr_latch_same_exit(
; EPILOG-NEXT: entry:
; EPILOG-NEXT: %0 = freeze i64 %n
; EPILOG-NEXT: %cmp = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp, label %for.exit2.loopexit, label %latch
; EPILOG: latch:
-; EPILOG-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; EPILOG-NEXT: %3 = load i32, i32* %arrayidx, align 4
+; EPILOG-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; EPILOG-NEXT: %3 = load i32, ptr %arrayidx, align 4
; EPILOG-NEXT: %add = add nsw i32 %3, %sum.02
; EPILOG-NEXT: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
; EPILOG-NEXT: %niter.next = add nuw nsw i64 %niter, 1
; EPILOG-NEXT: %cmp.1 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.1, label %for.exit2.loopexit, label %latch.1
; EPILOG: latch.1:
-; EPILOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; EPILOG-NEXT: %4 = load i32, i32* %arrayidx.1, align 4
+; EPILOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; EPILOG-NEXT: %4 = load i32, ptr %arrayidx.1, align 4
; EPILOG-NEXT: %add.1 = add nsw i32 %4, %add
; EPILOG-NEXT: %indvars.iv.next.1 = add nuw nsw i64 %indvars.iv.next, 1
; EPILOG-NEXT: %niter.next.1 = add nuw nsw i64 %niter.next, 1
; EPILOG-NEXT: %cmp.2 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.2, label %for.exit2.loopexit, label %latch.2
; EPILOG: latch.2:
-; EPILOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.1
-; EPILOG-NEXT: %5 = load i32, i32* %arrayidx.2, align 4
+; EPILOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.1
+; EPILOG-NEXT: %5 = load i32, ptr %arrayidx.2, align 4
; EPILOG-NEXT: %add.2 = add nsw i32 %5, %add.1
; EPILOG-NEXT: %indvars.iv.next.2 = add nuw nsw i64 %indvars.iv.next.1, 1
; EPILOG-NEXT: %niter.next.2 = add nuw nsw i64 %niter.next.1, 1
; EPILOG-NEXT: %cmp.3 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.3, label %for.exit2.loopexit, label %latch.3
; EPILOG: latch.3:
-; EPILOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.2
-; EPILOG-NEXT: %6 = load i32, i32* %arrayidx.3, align 4
+; EPILOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.2
+; EPILOG-NEXT: %6 = load i32, ptr %arrayidx.3, align 4
; EPILOG-NEXT: %add.3 = add nsw i32 %6, %add.2
; EPILOG-NEXT: %indvars.iv.next.3 = add nuw nsw i64 %indvars.iv.next.2, 1
; EPILOG-NEXT: %niter.next.3 = add nuw nsw i64 %niter.next.2, 1
; EPILOG-NEXT: %cmp.4 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.4, label %for.exit2.loopexit, label %latch.4
; EPILOG: latch.4:
-; EPILOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.3
-; EPILOG-NEXT: %7 = load i32, i32* %arrayidx.4, align 4
+; EPILOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.3
+; EPILOG-NEXT: %7 = load i32, ptr %arrayidx.4, align 4
; EPILOG-NEXT: %add.4 = add nsw i32 %7, %add.3
; EPILOG-NEXT: %indvars.iv.next.4 = add nuw nsw i64 %indvars.iv.next.3, 1
; EPILOG-NEXT: %niter.next.4 = add nuw nsw i64 %niter.next.3, 1
; EPILOG-NEXT: %cmp.5 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.5, label %for.exit2.loopexit, label %latch.5
; EPILOG: latch.5:
-; EPILOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.4
-; EPILOG-NEXT: %8 = load i32, i32* %arrayidx.5, align 4
+; EPILOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.4
+; EPILOG-NEXT: %8 = load i32, ptr %arrayidx.5, align 4
; EPILOG-NEXT: %add.5 = add nsw i32 %8, %add.4
; EPILOG-NEXT: %indvars.iv.next.5 = add nuw nsw i64 %indvars.iv.next.4, 1
; EPILOG-NEXT: %niter.next.5 = add nuw nsw i64 %niter.next.4, 1
; EPILOG-NEXT: %cmp.6 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.6, label %for.exit2.loopexit, label %latch.6
; EPILOG: latch.6:
-; EPILOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.5
-; EPILOG-NEXT: %9 = load i32, i32* %arrayidx.6, align 4
+; EPILOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.5
+; EPILOG-NEXT: %9 = load i32, ptr %arrayidx.6, align 4
; EPILOG-NEXT: %add.6 = add nsw i32 %9, %add.5
; EPILOG-NEXT: %indvars.iv.next.6 = add nuw nsw i64 %indvars.iv.next.5, 1
; EPILOG-NEXT: %niter.next.6 = add nuw nsw i64 %niter.next.5, 1
; EPILOG-NEXT: %cmp.7 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.7, label %for.exit2.loopexit, label %latch.7
; EPILOG: latch.7:
-; EPILOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.6
-; EPILOG-NEXT: %10 = load i32, i32* %arrayidx.7, align 4
+; EPILOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.6
+; EPILOG-NEXT: %10 = load i32, ptr %arrayidx.7, align 4
; EPILOG-NEXT: %add.7 = add nsw i32 %10, %add.6
; EPILOG-NEXT: %indvars.iv.next.7 = add i64 %indvars.iv.next.6, 1
; EPILOG-NEXT: %niter.next.7 = add i64 %niter.next.6, 1
; EPILOG-NEXT: %cmp.epil = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.epil, label %for.exit2.loopexit4, label %latch.epil
; EPILOG: latch.epil:
-; EPILOG-NEXT: %arrayidx.epil = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.epil
-; EPILOG-NEXT: %11 = load i32, i32* %arrayidx.epil, align 4
+; EPILOG-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.epil
+; EPILOG-NEXT: %11 = load i32, ptr %arrayidx.epil, align 4
; EPILOG-NEXT: %add.epil = add nsw i32 %11, %sum.02.epil
; EPILOG-NEXT: %indvars.iv.next.epil = add i64 %indvars.iv.epil, 1
; EPILOG-NEXT: %exitcond.epil = icmp eq i64 %indvars.iv.next.epil, %n
; EPILOG-BLOCK-NEXT: %cmp = icmp eq i64 %n, 42
; EPILOG-BLOCK-NEXT: br i1 %cmp, label %for.exit2.loopexit, label %latch
; EPILOG-BLOCK: latch:
-; EPILOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; EPILOG-BLOCK-NEXT: %3 = load i32, i32* %arrayidx, align 4
+; EPILOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; EPILOG-BLOCK-NEXT: %3 = load i32, ptr %arrayidx, align 4
; EPILOG-BLOCK-NEXT: %add = add nsw i32 %3, %sum.02
; EPILOG-BLOCK-NEXT: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
; EPILOG-BLOCK-NEXT: %niter.next = add nuw nsw i64 %niter, 1
; EPILOG-BLOCK-NEXT: %cmp.1 = icmp eq i64 %n, 42
; EPILOG-BLOCK-NEXT: br i1 %cmp.1, label %for.exit2.loopexit, label %latch.1
; EPILOG-BLOCK: latch.1:
-; EPILOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; EPILOG-BLOCK-NEXT: %4 = load i32, i32* %arrayidx.1, align 4
+; EPILOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; EPILOG-BLOCK-NEXT: %4 = load i32, ptr %arrayidx.1, align 4
; EPILOG-BLOCK-NEXT: %add.1 = add nsw i32 %4, %add
; EPILOG-BLOCK-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv.next, 1
; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter.next, 1
; EPILOG-BLOCK-NEXT: %cmp.epil = icmp eq i64 %n, 42
; EPILOG-BLOCK-NEXT: br i1 %cmp.epil, label %for.exit2, label %latch.epil
; EPILOG-BLOCK: latch.epil:
-; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.unr
-; EPILOG-BLOCK-NEXT: %5 = load i32, i32* %arrayidx.epil, align 4
+; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.unr
+; EPILOG-BLOCK-NEXT: %5 = load i32, ptr %arrayidx.epil, align 4
; EPILOG-BLOCK-NEXT: %add.epil = add nsw i32 %5, %sum.02.unr
; EPILOG-BLOCK-NEXT: br label %latchExit.epilog-lcssa
; EPILOG-BLOCK: latchExit.epilog-lcssa.loopexit:
; PROLOG-NEXT: %cmp.prol = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.prol, label %for.exit2.loopexit3, label %latch.prol
; PROLOG: latch.prol:
-; PROLOG-NEXT: %arrayidx.prol = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.prol
-; PROLOG-NEXT: %2 = load i32, i32* %arrayidx.prol, align 4
+; PROLOG-NEXT: %arrayidx.prol = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.prol
+; PROLOG-NEXT: %2 = load i32, ptr %arrayidx.prol, align 4
; PROLOG-NEXT: %add.prol = add nsw i32 %2, %sum.02.prol
; PROLOG-NEXT: %indvars.iv.next.prol = add i64 %indvars.iv.prol, 1
; PROLOG-NEXT: %exitcond.prol = icmp eq i64 %indvars.iv.next.prol, %n
; PROLOG-NEXT: %cmp = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp, label %for.exit2.loopexit, label %latch
; PROLOG: latch:
-; PROLOG-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; PROLOG-NEXT: %4 = load i32, i32* %arrayidx, align 4
+; PROLOG-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; PROLOG-NEXT: %4 = load i32, ptr %arrayidx, align 4
; PROLOG-NEXT: %add = add nsw i32 %4, %sum.02
; PROLOG-NEXT: %indvars.iv.next = add i64 %indvars.iv, 1
; PROLOG-NEXT: br i1 %cond, label %latchExit.unr-lcssa.loopexit, label %for.exiting_block.1
; PROLOG-NEXT: %cmp.1 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.1, label %for.exit2.loopexit, label %latch.1
; PROLOG: latch.1:
-; PROLOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; PROLOG-NEXT: %5 = load i32, i32* %arrayidx.1, align 4
+; PROLOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; PROLOG-NEXT: %5 = load i32, ptr %arrayidx.1, align 4
; PROLOG-NEXT: %add.1 = add nsw i32 %5, %add
; PROLOG-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv.next, 1
; PROLOG-NEXT: br i1 %cond, label %latchExit.unr-lcssa.loopexit, label %for.exiting_block.2
; PROLOG-NEXT: %cmp.2 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.2, label %for.exit2.loopexit, label %latch.2
; PROLOG: latch.2:
-; PROLOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.1
-; PROLOG-NEXT: %6 = load i32, i32* %arrayidx.2, align 4
+; PROLOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.1
+; PROLOG-NEXT: %6 = load i32, ptr %arrayidx.2, align 4
; PROLOG-NEXT: %add.2 = add nsw i32 %6, %add.1
; PROLOG-NEXT: %indvars.iv.next.2 = add i64 %indvars.iv.next.1, 1
; PROLOG-NEXT: br i1 %cond, label %latchExit.unr-lcssa.loopexit, label %for.exiting_block.3
; PROLOG-NEXT: %cmp.3 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.3, label %for.exit2.loopexit, label %latch.3
; PROLOG: latch.3:
-; PROLOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.2
-; PROLOG-NEXT: %7 = load i32, i32* %arrayidx.3, align 4
+; PROLOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.2
+; PROLOG-NEXT: %7 = load i32, ptr %arrayidx.3, align 4
; PROLOG-NEXT: %add.3 = add nsw i32 %7, %add.2
; PROLOG-NEXT: %indvars.iv.next.3 = add i64 %indvars.iv.next.2, 1
; PROLOG-NEXT: br i1 %cond, label %latchExit.unr-lcssa.loopexit, label %for.exiting_block.4
; PROLOG-NEXT: %cmp.4 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.4, label %for.exit2.loopexit, label %latch.4
; PROLOG: latch.4:
-; PROLOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.3
-; PROLOG-NEXT: %8 = load i32, i32* %arrayidx.4, align 4
+; PROLOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.3
+; PROLOG-NEXT: %8 = load i32, ptr %arrayidx.4, align 4
; PROLOG-NEXT: %add.4 = add nsw i32 %8, %add.3
; PROLOG-NEXT: %indvars.iv.next.4 = add i64 %indvars.iv.next.3, 1
; PROLOG-NEXT: br i1 %cond, label %latchExit.unr-lcssa.loopexit, label %for.exiting_block.5
; PROLOG-NEXT: %cmp.5 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.5, label %for.exit2.loopexit, label %latch.5
; PROLOG: latch.5:
-; PROLOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.4
-; PROLOG-NEXT: %9 = load i32, i32* %arrayidx.5, align 4
+; PROLOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.4
+; PROLOG-NEXT: %9 = load i32, ptr %arrayidx.5, align 4
; PROLOG-NEXT: %add.5 = add nsw i32 %9, %add.4
; PROLOG-NEXT: %indvars.iv.next.5 = add i64 %indvars.iv.next.4, 1
; PROLOG-NEXT: br i1 %cond, label %latchExit.unr-lcssa.loopexit, label %for.exiting_block.6
; PROLOG-NEXT: %cmp.6 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.6, label %for.exit2.loopexit, label %latch.6
; PROLOG: latch.6:
-; PROLOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.5
-; PROLOG-NEXT: %10 = load i32, i32* %arrayidx.6, align 4
+; PROLOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.5
+; PROLOG-NEXT: %10 = load i32, ptr %arrayidx.6, align 4
; PROLOG-NEXT: %add.6 = add nsw i32 %10, %add.5
; PROLOG-NEXT: %indvars.iv.next.6 = add i64 %indvars.iv.next.5, 1
; PROLOG-NEXT: br i1 %cond, label %latchExit.unr-lcssa.loopexit, label %for.exiting_block.7
; PROLOG-NEXT: %cmp.7 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.7, label %for.exit2.loopexit, label %latch.7
; PROLOG: latch.7:
-; PROLOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.6
-; PROLOG-NEXT: %11 = load i32, i32* %arrayidx.7, align 4
+; PROLOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.6
+; PROLOG-NEXT: %11 = load i32, ptr %arrayidx.7, align 4
; PROLOG-NEXT: %add.7 = add nsw i32 %11, %add.6
; PROLOG-NEXT: %indvars.iv.next.7 = add i64 %indvars.iv.next.6, 1
; PROLOG-NEXT: %exitcond.7 = icmp eq i64 %indvars.iv.next.7, %n
; PROLOG-BLOCK-NEXT: %cmp.prol = icmp eq i64 %n, 42
; PROLOG-BLOCK-NEXT: br i1 %cmp.prol, label %for.exit2, label %latch.prol
; PROLOG-BLOCK: latch.prol:
-; PROLOG-BLOCK-NEXT: %2 = load i32, i32* %a, align 4
+; PROLOG-BLOCK-NEXT: %2 = load i32, ptr %a, align 4
; PROLOG-BLOCK-NEXT: br label %header.prol.loopexit
; PROLOG-BLOCK: header.prol.loopexit:
; PROLOG-BLOCK-NEXT: %result.unr = phi i32 [ undef, %entry ], [ %2, %latch.prol ]
; PROLOG-BLOCK-NEXT: %cmp = icmp eq i64 %n, 42
; PROLOG-BLOCK-NEXT: br i1 %cmp, label %for.exit2.loopexit, label %latch
; PROLOG-BLOCK: latch:
-; PROLOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; PROLOG-BLOCK-NEXT: %4 = load i32, i32* %arrayidx, align 4
+; PROLOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; PROLOG-BLOCK-NEXT: %4 = load i32, ptr %arrayidx, align 4
; PROLOG-BLOCK-NEXT: %add = add nsw i32 %4, %sum.02
; PROLOG-BLOCK-NEXT: %indvars.iv.next = add i64 %indvars.iv, 1
; PROLOG-BLOCK-NEXT: br i1 %cond, label %latchExit.unr-lcssa.loopexit, label %for.exiting_block.1
; PROLOG-BLOCK-NEXT: %cmp.1 = icmp eq i64 %n, 42
; PROLOG-BLOCK-NEXT: br i1 %cmp.1, label %for.exit2.loopexit, label %latch.1
; PROLOG-BLOCK: latch.1:
-; PROLOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; PROLOG-BLOCK-NEXT: %5 = load i32, i32* %arrayidx.1, align 4
+; PROLOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; PROLOG-BLOCK-NEXT: %5 = load i32, ptr %arrayidx.1, align 4
; PROLOG-BLOCK-NEXT: %add.1 = add nsw i32 %5, %add
; PROLOG-BLOCK-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv.next, 1
; PROLOG-BLOCK-NEXT: %exitcond.1 = icmp eq i64 %indvars.iv.next.1, %n
br i1 %cmp, label %for.exit2, label %latch
latch: ; preds = %latch, %entry
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, %n
; Two exiting blocks to latch where the exiting blocks are Latch and a
; non-header
; FIXME: We should unroll this loop.
-define i32 @otherblock_latch_same_exit(i32* nocapture %a, i64 %n, i1 %cond) {
+define i32 @otherblock_latch_same_exit(ptr nocapture %a, i64 %n, i1 %cond) {
; EPILOG-LABEL: @otherblock_latch_same_exit(
; EPILOG-NEXT: entry:
; EPILOG-NEXT: %0 = freeze i64 %n
; EPILOG-NEXT: %cmp = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp, label %latchExit.epilog-lcssa.loopexit, label %latch
; EPILOG: latch:
-; EPILOG-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; EPILOG-NEXT: %3 = load i32, i32* %arrayidx, align 4
+; EPILOG-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; EPILOG-NEXT: %3 = load i32, ptr %arrayidx, align 4
; EPILOG-NEXT: %add = add nsw i32 %3, %sum.02
; EPILOG-NEXT: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
; EPILOG-NEXT: %niter.next = add nuw nsw i64 %niter, 1
; EPILOG-NEXT: %cmp.1 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.1, label %latchExit.epilog-lcssa.loopexit, label %latch.1
; EPILOG: latch.1:
-; EPILOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; EPILOG-NEXT: %4 = load i32, i32* %arrayidx.1, align 4
+; EPILOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; EPILOG-NEXT: %4 = load i32, ptr %arrayidx.1, align 4
; EPILOG-NEXT: %add.1 = add nsw i32 %4, %add
; EPILOG-NEXT: %indvars.iv.next.1 = add nuw nsw i64 %indvars.iv.next, 1
; EPILOG-NEXT: %niter.next.1 = add nuw nsw i64 %niter.next, 1
; EPILOG-NEXT: %cmp.2 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.2, label %latchExit.epilog-lcssa.loopexit, label %latch.2
; EPILOG: latch.2:
-; EPILOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.1
-; EPILOG-NEXT: %5 = load i32, i32* %arrayidx.2, align 4
+; EPILOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.1
+; EPILOG-NEXT: %5 = load i32, ptr %arrayidx.2, align 4
; EPILOG-NEXT: %add.2 = add nsw i32 %5, %add.1
; EPILOG-NEXT: %indvars.iv.next.2 = add nuw nsw i64 %indvars.iv.next.1, 1
; EPILOG-NEXT: %niter.next.2 = add nuw nsw i64 %niter.next.1, 1
; EPILOG-NEXT: %cmp.3 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.3, label %latchExit.epilog-lcssa.loopexit, label %latch.3
; EPILOG: latch.3:
-; EPILOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.2
-; EPILOG-NEXT: %6 = load i32, i32* %arrayidx.3, align 4
+; EPILOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.2
+; EPILOG-NEXT: %6 = load i32, ptr %arrayidx.3, align 4
; EPILOG-NEXT: %add.3 = add nsw i32 %6, %add.2
; EPILOG-NEXT: %indvars.iv.next.3 = add nuw nsw i64 %indvars.iv.next.2, 1
; EPILOG-NEXT: %niter.next.3 = add nuw nsw i64 %niter.next.2, 1
; EPILOG-NEXT: %cmp.4 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.4, label %latchExit.epilog-lcssa.loopexit, label %latch.4
; EPILOG: latch.4:
-; EPILOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.3
-; EPILOG-NEXT: %7 = load i32, i32* %arrayidx.4, align 4
+; EPILOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.3
+; EPILOG-NEXT: %7 = load i32, ptr %arrayidx.4, align 4
; EPILOG-NEXT: %add.4 = add nsw i32 %7, %add.3
; EPILOG-NEXT: %indvars.iv.next.4 = add nuw nsw i64 %indvars.iv.next.3, 1
; EPILOG-NEXT: %niter.next.4 = add nuw nsw i64 %niter.next.3, 1
; EPILOG-NEXT: %cmp.5 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.5, label %latchExit.epilog-lcssa.loopexit, label %latch.5
; EPILOG: latch.5:
-; EPILOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.4
-; EPILOG-NEXT: %8 = load i32, i32* %arrayidx.5, align 4
+; EPILOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.4
+; EPILOG-NEXT: %8 = load i32, ptr %arrayidx.5, align 4
; EPILOG-NEXT: %add.5 = add nsw i32 %8, %add.4
; EPILOG-NEXT: %indvars.iv.next.5 = add nuw nsw i64 %indvars.iv.next.4, 1
; EPILOG-NEXT: %niter.next.5 = add nuw nsw i64 %niter.next.4, 1
; EPILOG-NEXT: %cmp.6 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.6, label %latchExit.epilog-lcssa.loopexit, label %latch.6
; EPILOG: latch.6:
-; EPILOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.5
-; EPILOG-NEXT: %9 = load i32, i32* %arrayidx.6, align 4
+; EPILOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.5
+; EPILOG-NEXT: %9 = load i32, ptr %arrayidx.6, align 4
; EPILOG-NEXT: %add.6 = add nsw i32 %9, %add.5
; EPILOG-NEXT: %indvars.iv.next.6 = add nuw nsw i64 %indvars.iv.next.5, 1
; EPILOG-NEXT: %niter.next.6 = add nuw nsw i64 %niter.next.5, 1
; EPILOG-NEXT: %cmp.7 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.7, label %latchExit.epilog-lcssa.loopexit, label %latch.7
; EPILOG: latch.7:
-; EPILOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.6
-; EPILOG-NEXT: %10 = load i32, i32* %arrayidx.7, align 4
+; EPILOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.6
+; EPILOG-NEXT: %10 = load i32, ptr %arrayidx.7, align 4
; EPILOG-NEXT: %add.7 = add nsw i32 %10, %add.6
; EPILOG-NEXT: %indvars.iv.next.7 = add i64 %indvars.iv.next.6, 1
; EPILOG-NEXT: %niter.next.7 = add i64 %niter.next.6, 1
; EPILOG-NEXT: %cmp.epil = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.epil, label %latchExit.epilog-lcssa.loopexit3, label %latch.epil
; EPILOG: latch.epil:
-; EPILOG-NEXT: %arrayidx.epil = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.epil
-; EPILOG-NEXT: %11 = load i32, i32* %arrayidx.epil, align 4
+; EPILOG-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.epil
+; EPILOG-NEXT: %11 = load i32, ptr %arrayidx.epil, align 4
; EPILOG-NEXT: %add.epil = add nsw i32 %11, %sum.02.epil
; EPILOG-NEXT: %indvars.iv.next.epil = add i64 %indvars.iv.epil, 1
; EPILOG-NEXT: %exitcond.epil = icmp eq i64 %indvars.iv.next.epil, %n
; EPILOG-BLOCK-NEXT: %cmp = icmp eq i64 %n, 42
; EPILOG-BLOCK-NEXT: br i1 %cmp, label %latchExit.epilog-lcssa.loopexit, label %latch
; EPILOG-BLOCK: latch:
-; EPILOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; EPILOG-BLOCK-NEXT: %3 = load i32, i32* %arrayidx, align 4
+; EPILOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; EPILOG-BLOCK-NEXT: %3 = load i32, ptr %arrayidx, align 4
; EPILOG-BLOCK-NEXT: %add = add nsw i32 %3, %sum.02
; EPILOG-BLOCK-NEXT: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
; EPILOG-BLOCK-NEXT: %niter.next = add nuw nsw i64 %niter, 1
; EPILOG-BLOCK-NEXT: %cmp.1 = icmp eq i64 %n, 42
; EPILOG-BLOCK-NEXT: br i1 %cmp.1, label %latchExit.epilog-lcssa.loopexit, label %latch.1
; EPILOG-BLOCK: latch.1:
-; EPILOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; EPILOG-BLOCK-NEXT: %4 = load i32, i32* %arrayidx.1, align 4
+; EPILOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; EPILOG-BLOCK-NEXT: %4 = load i32, ptr %arrayidx.1, align 4
; EPILOG-BLOCK-NEXT: %add.1 = add nsw i32 %4, %add
; EPILOG-BLOCK-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv.next, 1
; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter.next, 1
; EPILOG-BLOCK-NEXT: %cmp.epil = icmp eq i64 %n, 42
; EPILOG-BLOCK-NEXT: br i1 %cmp.epil, label %latchExit.epilog-lcssa, label %latch.epil
; EPILOG-BLOCK: latch.epil:
-; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.unr
-; EPILOG-BLOCK-NEXT: %5 = load i32, i32* %arrayidx.epil, align 4
+; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.unr
+; EPILOG-BLOCK-NEXT: %5 = load i32, ptr %arrayidx.epil, align 4
; EPILOG-BLOCK-NEXT: %add.epil = add nsw i32 %5, %sum.02.unr
; EPILOG-BLOCK-NEXT: br label %latchExit.epilog-lcssa
; EPILOG-BLOCK: latchExit.epilog-lcssa.loopexit:
; PROLOG-NEXT: %cmp.prol = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.prol, label %latchExit.unr-lcssa.loopexit2, label %latch.prol
; PROLOG: latch.prol:
-; PROLOG-NEXT: %arrayidx.prol = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.prol
-; PROLOG-NEXT: %2 = load i32, i32* %arrayidx.prol, align 4
+; PROLOG-NEXT: %arrayidx.prol = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.prol
+; PROLOG-NEXT: %2 = load i32, ptr %arrayidx.prol, align 4
; PROLOG-NEXT: %add.prol = add nsw i32 %2, %sum.02.prol
; PROLOG-NEXT: %indvars.iv.next.prol = add i64 %indvars.iv.prol, 1
; PROLOG-NEXT: %exitcond.prol = icmp eq i64 %indvars.iv.next.prol, %n
; PROLOG-NEXT: %cmp = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp, label %latchExit.unr-lcssa.loopexit, label %latch
; PROLOG: latch:
-; PROLOG-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; PROLOG-NEXT: %4 = load i32, i32* %arrayidx, align 4
+; PROLOG-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; PROLOG-NEXT: %4 = load i32, ptr %arrayidx, align 4
; PROLOG-NEXT: %add = add nsw i32 %4, %sum.02
; PROLOG-NEXT: %indvars.iv.next = add i64 %indvars.iv, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.1
; PROLOG-NEXT: %cmp.1 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.1, label %latchExit.unr-lcssa.loopexit, label %latch.1
; PROLOG: latch.1:
-; PROLOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; PROLOG-NEXT: %5 = load i32, i32* %arrayidx.1, align 4
+; PROLOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; PROLOG-NEXT: %5 = load i32, ptr %arrayidx.1, align 4
; PROLOG-NEXT: %add.1 = add nsw i32 %5, %add
; PROLOG-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv.next, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.2
; PROLOG-NEXT: %cmp.2 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.2, label %latchExit.unr-lcssa.loopexit, label %latch.2
; PROLOG: latch.2:
-; PROLOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.1
-; PROLOG-NEXT: %6 = load i32, i32* %arrayidx.2, align 4
+; PROLOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.1
+; PROLOG-NEXT: %6 = load i32, ptr %arrayidx.2, align 4
; PROLOG-NEXT: %add.2 = add nsw i32 %6, %add.1
; PROLOG-NEXT: %indvars.iv.next.2 = add i64 %indvars.iv.next.1, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.3
; PROLOG-NEXT: %cmp.3 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.3, label %latchExit.unr-lcssa.loopexit, label %latch.3
; PROLOG: latch.3:
-; PROLOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.2
-; PROLOG-NEXT: %7 = load i32, i32* %arrayidx.3, align 4
+; PROLOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.2
+; PROLOG-NEXT: %7 = load i32, ptr %arrayidx.3, align 4
; PROLOG-NEXT: %add.3 = add nsw i32 %7, %add.2
; PROLOG-NEXT: %indvars.iv.next.3 = add i64 %indvars.iv.next.2, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.4
; PROLOG-NEXT: %cmp.4 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.4, label %latchExit.unr-lcssa.loopexit, label %latch.4
; PROLOG: latch.4:
-; PROLOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.3
-; PROLOG-NEXT: %8 = load i32, i32* %arrayidx.4, align 4
+; PROLOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.3
+; PROLOG-NEXT: %8 = load i32, ptr %arrayidx.4, align 4
; PROLOG-NEXT: %add.4 = add nsw i32 %8, %add.3
; PROLOG-NEXT: %indvars.iv.next.4 = add i64 %indvars.iv.next.3, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.5
; PROLOG-NEXT: %cmp.5 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.5, label %latchExit.unr-lcssa.loopexit, label %latch.5
; PROLOG: latch.5:
-; PROLOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.4
-; PROLOG-NEXT: %9 = load i32, i32* %arrayidx.5, align 4
+; PROLOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.4
+; PROLOG-NEXT: %9 = load i32, ptr %arrayidx.5, align 4
; PROLOG-NEXT: %add.5 = add nsw i32 %9, %add.4
; PROLOG-NEXT: %indvars.iv.next.5 = add i64 %indvars.iv.next.4, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.6
; PROLOG-NEXT: %cmp.6 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.6, label %latchExit.unr-lcssa.loopexit, label %latch.6
; PROLOG: latch.6:
-; PROLOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.5
-; PROLOG-NEXT: %10 = load i32, i32* %arrayidx.6, align 4
+; PROLOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.5
+; PROLOG-NEXT: %10 = load i32, ptr %arrayidx.6, align 4
; PROLOG-NEXT: %add.6 = add nsw i32 %10, %add.5
; PROLOG-NEXT: %indvars.iv.next.6 = add i64 %indvars.iv.next.5, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.7
; PROLOG-NEXT: %cmp.7 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.7, label %latchExit.unr-lcssa.loopexit, label %latch.7
; PROLOG: latch.7:
-; PROLOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.6
-; PROLOG-NEXT: %11 = load i32, i32* %arrayidx.7, align 4
+; PROLOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.6
+; PROLOG-NEXT: %11 = load i32, ptr %arrayidx.7, align 4
; PROLOG-NEXT: %add.7 = add nsw i32 %11, %add.6
; PROLOG-NEXT: %indvars.iv.next.7 = add i64 %indvars.iv.next.6, 1
; PROLOG-NEXT: %exitcond.7 = icmp eq i64 %indvars.iv.next.7, %n
; PROLOG-BLOCK-NEXT: %cmp.prol = icmp eq i64 %n, 42
; PROLOG-BLOCK-NEXT: br i1 %cmp.prol, label %latchExit.unr-lcssa, label %latch.prol
; PROLOG-BLOCK: latch.prol:
-; PROLOG-BLOCK-NEXT: %2 = load i32, i32* %a, align 4
+; PROLOG-BLOCK-NEXT: %2 = load i32, ptr %a, align 4
; PROLOG-BLOCK-NEXT: br label %header.prol.loopexit
; PROLOG-BLOCK: header.prol.loopexit:
; PROLOG-BLOCK-NEXT: %result.unr = phi i32 [ undef, %entry ], [ %2, %latch.prol ]
; PROLOG-BLOCK-NEXT: %cmp = icmp eq i64 %n, 42
; PROLOG-BLOCK-NEXT: br i1 %cmp, label %latchExit.unr-lcssa.loopexit, label %latch
; PROLOG-BLOCK: latch:
-; PROLOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; PROLOG-BLOCK-NEXT: %4 = load i32, i32* %arrayidx, align 4
+; PROLOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; PROLOG-BLOCK-NEXT: %4 = load i32, ptr %arrayidx, align 4
; PROLOG-BLOCK-NEXT: %add = add nsw i32 %4, %sum.02
; PROLOG-BLOCK-NEXT: %indvars.iv.next = add i64 %indvars.iv, 1
; PROLOG-BLOCK-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.1
; PROLOG-BLOCK-NEXT: %cmp.1 = icmp eq i64 %n, 42
; PROLOG-BLOCK-NEXT: br i1 %cmp.1, label %latchExit.unr-lcssa.loopexit, label %latch.1
; PROLOG-BLOCK: latch.1:
-; PROLOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; PROLOG-BLOCK-NEXT: %5 = load i32, i32* %arrayidx.1, align 4
+; PROLOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; PROLOG-BLOCK-NEXT: %5 = load i32, ptr %arrayidx.1, align 4
; PROLOG-BLOCK-NEXT: %add.1 = add nsw i32 %5, %add
; PROLOG-BLOCK-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv.next, 1
; PROLOG-BLOCK-NEXT: %exitcond.1 = icmp eq i64 %indvars.iv.next.1, %n
br i1 %cmp, label %latchExit, label %latch
latch: ; preds = %latch, %entry
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, %n
; non-header
; Same as above test except the incoming value for latch Phi is from the header
; FIXME: We should be able to runtime unroll.
-define i32 @otherblock_latch_same_exit2(i32* nocapture %a, i64 %n, i1 %cond) {
+define i32 @otherblock_latch_same_exit2(ptr nocapture %a, i64 %n, i1 %cond) {
; EPILOG-LABEL: @otherblock_latch_same_exit2(
; EPILOG-NEXT: entry:
; EPILOG-NEXT: %0 = freeze i64 %n
; EPILOG-NEXT: %cmp = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp, label %latchExit.epilog-lcssa.loopexit, label %latch
; EPILOG: latch:
-; EPILOG-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; EPILOG-NEXT: %3 = load i32, i32* %arrayidx, align 4
+; EPILOG-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; EPILOG-NEXT: %3 = load i32, ptr %arrayidx, align 4
; EPILOG-NEXT: %add = add nsw i32 %3, %sum.02
; EPILOG-NEXT: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
; EPILOG-NEXT: %niter.next = add nuw nsw i64 %niter, 1
; EPILOG-NEXT: %cmp.1 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.1, label %latchExit.epilog-lcssa.loopexit, label %latch.1
; EPILOG: latch.1:
-; EPILOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; EPILOG-NEXT: %4 = load i32, i32* %arrayidx.1, align 4
+; EPILOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; EPILOG-NEXT: %4 = load i32, ptr %arrayidx.1, align 4
; EPILOG-NEXT: %add.1 = add nsw i32 %4, %add
; EPILOG-NEXT: %indvars.iv.next.1 = add nuw nsw i64 %indvars.iv.next, 1
; EPILOG-NEXT: %niter.next.1 = add nuw nsw i64 %niter.next, 1
; EPILOG-NEXT: %cmp.2 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.2, label %latchExit.epilog-lcssa.loopexit, label %latch.2
; EPILOG: latch.2:
-; EPILOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.1
-; EPILOG-NEXT: %5 = load i32, i32* %arrayidx.2, align 4
+; EPILOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.1
+; EPILOG-NEXT: %5 = load i32, ptr %arrayidx.2, align 4
; EPILOG-NEXT: %add.2 = add nsw i32 %5, %add.1
; EPILOG-NEXT: %indvars.iv.next.2 = add nuw nsw i64 %indvars.iv.next.1, 1
; EPILOG-NEXT: %niter.next.2 = add nuw nsw i64 %niter.next.1, 1
; EPILOG-NEXT: %cmp.3 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.3, label %latchExit.epilog-lcssa.loopexit, label %latch.3
; EPILOG: latch.3:
-; EPILOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.2
-; EPILOG-NEXT: %6 = load i32, i32* %arrayidx.3, align 4
+; EPILOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.2
+; EPILOG-NEXT: %6 = load i32, ptr %arrayidx.3, align 4
; EPILOG-NEXT: %add.3 = add nsw i32 %6, %add.2
; EPILOG-NEXT: %indvars.iv.next.3 = add nuw nsw i64 %indvars.iv.next.2, 1
; EPILOG-NEXT: %niter.next.3 = add nuw nsw i64 %niter.next.2, 1
; EPILOG-NEXT: %cmp.4 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.4, label %latchExit.epilog-lcssa.loopexit, label %latch.4
; EPILOG: latch.4:
-; EPILOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.3
-; EPILOG-NEXT: %7 = load i32, i32* %arrayidx.4, align 4
+; EPILOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.3
+; EPILOG-NEXT: %7 = load i32, ptr %arrayidx.4, align 4
; EPILOG-NEXT: %add.4 = add nsw i32 %7, %add.3
; EPILOG-NEXT: %indvars.iv.next.4 = add nuw nsw i64 %indvars.iv.next.3, 1
; EPILOG-NEXT: %niter.next.4 = add nuw nsw i64 %niter.next.3, 1
; EPILOG-NEXT: %cmp.5 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.5, label %latchExit.epilog-lcssa.loopexit, label %latch.5
; EPILOG: latch.5:
-; EPILOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.4
-; EPILOG-NEXT: %8 = load i32, i32* %arrayidx.5, align 4
+; EPILOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.4
+; EPILOG-NEXT: %8 = load i32, ptr %arrayidx.5, align 4
; EPILOG-NEXT: %add.5 = add nsw i32 %8, %add.4
; EPILOG-NEXT: %indvars.iv.next.5 = add nuw nsw i64 %indvars.iv.next.4, 1
; EPILOG-NEXT: %niter.next.5 = add nuw nsw i64 %niter.next.4, 1
; EPILOG-NEXT: %cmp.6 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.6, label %latchExit.epilog-lcssa.loopexit, label %latch.6
; EPILOG: latch.6:
-; EPILOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.5
-; EPILOG-NEXT: %9 = load i32, i32* %arrayidx.6, align 4
+; EPILOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.5
+; EPILOG-NEXT: %9 = load i32, ptr %arrayidx.6, align 4
; EPILOG-NEXT: %add.6 = add nsw i32 %9, %add.5
; EPILOG-NEXT: %indvars.iv.next.6 = add nuw nsw i64 %indvars.iv.next.5, 1
; EPILOG-NEXT: %niter.next.6 = add nuw nsw i64 %niter.next.5, 1
; EPILOG-NEXT: %cmp.7 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.7, label %latchExit.epilog-lcssa.loopexit, label %latch.7
; EPILOG: latch.7:
-; EPILOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.6
-; EPILOG-NEXT: %10 = load i32, i32* %arrayidx.7, align 4
+; EPILOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.6
+; EPILOG-NEXT: %10 = load i32, ptr %arrayidx.7, align 4
; EPILOG-NEXT: %add.7 = add nsw i32 %10, %add.6
; EPILOG-NEXT: %indvars.iv.next.7 = add i64 %indvars.iv.next.6, 1
; EPILOG-NEXT: %niter.next.7 = add i64 %niter.next.6, 1
; EPILOG-NEXT: %cmp.epil = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.epil, label %latchExit.epilog-lcssa.loopexit3, label %latch.epil
; EPILOG: latch.epil:
-; EPILOG-NEXT: %arrayidx.epil = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.epil
-; EPILOG-NEXT: %11 = load i32, i32* %arrayidx.epil, align 4
+; EPILOG-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.epil
+; EPILOG-NEXT: %11 = load i32, ptr %arrayidx.epil, align 4
; EPILOG-NEXT: %add.epil = add nsw i32 %11, %sum.02.epil
; EPILOG-NEXT: %indvars.iv.next.epil = add i64 %indvars.iv.epil, 1
; EPILOG-NEXT: %exitcond.epil = icmp eq i64 %indvars.iv.next.epil, %n
; EPILOG-BLOCK-NEXT: %cmp = icmp eq i64 %n, 42
; EPILOG-BLOCK-NEXT: br i1 %cmp, label %latchExit.epilog-lcssa.loopexit, label %latch
; EPILOG-BLOCK: latch:
-; EPILOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; EPILOG-BLOCK-NEXT: %3 = load i32, i32* %arrayidx, align 4
+; EPILOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; EPILOG-BLOCK-NEXT: %3 = load i32, ptr %arrayidx, align 4
; EPILOG-BLOCK-NEXT: %add = add nsw i32 %3, %sum.02
; EPILOG-BLOCK-NEXT: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
; EPILOG-BLOCK-NEXT: %niter.next = add nuw nsw i64 %niter, 1
; EPILOG-BLOCK-NEXT: %cmp.1 = icmp eq i64 %n, 42
; EPILOG-BLOCK-NEXT: br i1 %cmp.1, label %latchExit.epilog-lcssa.loopexit, label %latch.1
; EPILOG-BLOCK: latch.1:
-; EPILOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; EPILOG-BLOCK-NEXT: %4 = load i32, i32* %arrayidx.1, align 4
+; EPILOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; EPILOG-BLOCK-NEXT: %4 = load i32, ptr %arrayidx.1, align 4
; EPILOG-BLOCK-NEXT: %add.1 = add nsw i32 %4, %add
; EPILOG-BLOCK-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv.next, 1
; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter.next, 1
; EPILOG-BLOCK-NEXT: %cmp.epil = icmp eq i64 %n, 42
; EPILOG-BLOCK-NEXT: br i1 %cmp.epil, label %latchExit.epilog-lcssa, label %latch.epil
; EPILOG-BLOCK: latch.epil:
-; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.unr
-; EPILOG-BLOCK-NEXT: %5 = load i32, i32* %arrayidx.epil, align 4
+; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.unr
+; EPILOG-BLOCK-NEXT: %5 = load i32, ptr %arrayidx.epil, align 4
; EPILOG-BLOCK-NEXT: %add.epil = add nsw i32 %5, %sum.02.unr
; EPILOG-BLOCK-NEXT: br label %latchExit.epilog-lcssa
; EPILOG-BLOCK: latchExit.epilog-lcssa.loopexit:
; PROLOG-NEXT: %cmp.prol = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.prol, label %latchExit.unr-lcssa.loopexit2, label %latch.prol
; PROLOG: latch.prol:
-; PROLOG-NEXT: %arrayidx.prol = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.prol
-; PROLOG-NEXT: %2 = load i32, i32* %arrayidx.prol, align 4
+; PROLOG-NEXT: %arrayidx.prol = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.prol
+; PROLOG-NEXT: %2 = load i32, ptr %arrayidx.prol, align 4
; PROLOG-NEXT: %add.prol = add nsw i32 %2, %sum.02.prol
; PROLOG-NEXT: %indvars.iv.next.prol = add i64 %indvars.iv.prol, 1
; PROLOG-NEXT: %exitcond.prol = icmp eq i64 %indvars.iv.next.prol, %n
; PROLOG-NEXT: %cmp = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp, label %latchExit.unr-lcssa.loopexit, label %latch
; PROLOG: latch:
-; PROLOG-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; PROLOG-NEXT: %4 = load i32, i32* %arrayidx, align 4
+; PROLOG-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; PROLOG-NEXT: %4 = load i32, ptr %arrayidx, align 4
; PROLOG-NEXT: %add = add nsw i32 %4, %sum.02
; PROLOG-NEXT: %indvars.iv.next = add i64 %indvars.iv, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.1
; PROLOG-NEXT: %cmp.1 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.1, label %latchExit.unr-lcssa.loopexit, label %latch.1
; PROLOG: latch.1:
-; PROLOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; PROLOG-NEXT: %5 = load i32, i32* %arrayidx.1, align 4
+; PROLOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; PROLOG-NEXT: %5 = load i32, ptr %arrayidx.1, align 4
; PROLOG-NEXT: %add.1 = add nsw i32 %5, %add
; PROLOG-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv.next, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.2
; PROLOG-NEXT: %cmp.2 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.2, label %latchExit.unr-lcssa.loopexit, label %latch.2
; PROLOG: latch.2:
-; PROLOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.1
-; PROLOG-NEXT: %6 = load i32, i32* %arrayidx.2, align 4
+; PROLOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.1
+; PROLOG-NEXT: %6 = load i32, ptr %arrayidx.2, align 4
; PROLOG-NEXT: %add.2 = add nsw i32 %6, %add.1
; PROLOG-NEXT: %indvars.iv.next.2 = add i64 %indvars.iv.next.1, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.3
; PROLOG-NEXT: %cmp.3 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.3, label %latchExit.unr-lcssa.loopexit, label %latch.3
; PROLOG: latch.3:
-; PROLOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.2
-; PROLOG-NEXT: %7 = load i32, i32* %arrayidx.3, align 4
+; PROLOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.2
+; PROLOG-NEXT: %7 = load i32, ptr %arrayidx.3, align 4
; PROLOG-NEXT: %add.3 = add nsw i32 %7, %add.2
; PROLOG-NEXT: %indvars.iv.next.3 = add i64 %indvars.iv.next.2, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.4
; PROLOG-NEXT: %cmp.4 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.4, label %latchExit.unr-lcssa.loopexit, label %latch.4
; PROLOG: latch.4:
-; PROLOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.3
-; PROLOG-NEXT: %8 = load i32, i32* %arrayidx.4, align 4
+; PROLOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.3
+; PROLOG-NEXT: %8 = load i32, ptr %arrayidx.4, align 4
; PROLOG-NEXT: %add.4 = add nsw i32 %8, %add.3
; PROLOG-NEXT: %indvars.iv.next.4 = add i64 %indvars.iv.next.3, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.5
; PROLOG-NEXT: %cmp.5 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.5, label %latchExit.unr-lcssa.loopexit, label %latch.5
; PROLOG: latch.5:
-; PROLOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.4
-; PROLOG-NEXT: %9 = load i32, i32* %arrayidx.5, align 4
+; PROLOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.4
+; PROLOG-NEXT: %9 = load i32, ptr %arrayidx.5, align 4
; PROLOG-NEXT: %add.5 = add nsw i32 %9, %add.4
; PROLOG-NEXT: %indvars.iv.next.5 = add i64 %indvars.iv.next.4, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.6
; PROLOG-NEXT: %cmp.6 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.6, label %latchExit.unr-lcssa.loopexit, label %latch.6
; PROLOG: latch.6:
-; PROLOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.5
-; PROLOG-NEXT: %10 = load i32, i32* %arrayidx.6, align 4
+; PROLOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.5
+; PROLOG-NEXT: %10 = load i32, ptr %arrayidx.6, align 4
; PROLOG-NEXT: %add.6 = add nsw i32 %10, %add.5
; PROLOG-NEXT: %indvars.iv.next.6 = add i64 %indvars.iv.next.5, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.7
; PROLOG-NEXT: %cmp.7 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.7, label %latchExit.unr-lcssa.loopexit, label %latch.7
; PROLOG: latch.7:
-; PROLOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.6
-; PROLOG-NEXT: %11 = load i32, i32* %arrayidx.7, align 4
+; PROLOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.6
+; PROLOG-NEXT: %11 = load i32, ptr %arrayidx.7, align 4
; PROLOG-NEXT: %add.7 = add nsw i32 %11, %add.6
; PROLOG-NEXT: %indvars.iv.next.7 = add i64 %indvars.iv.next.6, 1
; PROLOG-NEXT: %exitcond.7 = icmp eq i64 %indvars.iv.next.7, %n
; PROLOG-BLOCK-NEXT: %cmp.prol = icmp eq i64 %n, 42
; PROLOG-BLOCK-NEXT: br i1 %cmp.prol, label %latchExit.unr-lcssa, label %latch.prol
; PROLOG-BLOCK: latch.prol:
-; PROLOG-BLOCK-NEXT: %2 = load i32, i32* %a, align 4
+; PROLOG-BLOCK-NEXT: %2 = load i32, ptr %a, align 4
; PROLOG-BLOCK-NEXT: br label %header.prol.loopexit
; PROLOG-BLOCK: header.prol.loopexit:
; PROLOG-BLOCK-NEXT: %result.unr = phi i32 [ undef, %entry ], [ %2, %latch.prol ]
; PROLOG-BLOCK-NEXT: %cmp = icmp eq i64 %n, 42
; PROLOG-BLOCK-NEXT: br i1 %cmp, label %latchExit.unr-lcssa.loopexit, label %latch
; PROLOG-BLOCK: latch:
-; PROLOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; PROLOG-BLOCK-NEXT: %4 = load i32, i32* %arrayidx, align 4
+; PROLOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; PROLOG-BLOCK-NEXT: %4 = load i32, ptr %arrayidx, align 4
; PROLOG-BLOCK-NEXT: %add = add nsw i32 %4, %sum.02
; PROLOG-BLOCK-NEXT: %indvars.iv.next = add i64 %indvars.iv, 1
; PROLOG-BLOCK-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.1
; PROLOG-BLOCK-NEXT: %cmp.1 = icmp eq i64 %n, 42
; PROLOG-BLOCK-NEXT: br i1 %cmp.1, label %latchExit.unr-lcssa.loopexit, label %latch.1
; PROLOG-BLOCK: latch.1:
-; PROLOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; PROLOG-BLOCK-NEXT: %5 = load i32, i32* %arrayidx.1, align 4
+; PROLOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; PROLOG-BLOCK-NEXT: %5 = load i32, ptr %arrayidx.1, align 4
; PROLOG-BLOCK-NEXT: %add.1 = add nsw i32 %5, %add
; PROLOG-BLOCK-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv.next, 1
; PROLOG-BLOCK-NEXT: %exitcond.1 = icmp eq i64 %indvars.iv.next.1, %n
br i1 %cmp, label %latchExit, label %latch
latch: ; preds = %latch, %entry
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, %n
; Same as above test except the incoming value for cloned latch Phi is from the
; for.exiting_block.
; FIXME: We should be able to runtime unroll.
-define i32 @otherblock_latch_same_exit3(i32* nocapture %a, i64 %n, i1 %cond) {
+define i32 @otherblock_latch_same_exit3(ptr nocapture %a, i64 %n, i1 %cond) {
; EPILOG-LABEL: @otherblock_latch_same_exit3(
; EPILOG-NEXT: entry:
; EPILOG-NEXT: %0 = freeze i64 %n
; EPILOG-NEXT: %niter = phi i64 [ 0, %entry.new ], [ %niter.next.7, %latch.7 ]
; EPILOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block
; EPILOG: for.exiting_block:
-; EPILOG-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; EPILOG-NEXT: %3 = load i32, i32* %arrayidx, align 4
+; EPILOG-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; EPILOG-NEXT: %3 = load i32, ptr %arrayidx, align 4
; EPILOG-NEXT: %add = add nsw i32 %3, %sum.02
; EPILOG-NEXT: %cmp = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp, label %latchExit.epilog-lcssa.loopexit, label %latch
; EPILOG-NEXT: %niter.next = add nuw nsw i64 %niter, 1
; EPILOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.1
; EPILOG: for.exiting_block.1:
-; EPILOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; EPILOG-NEXT: %4 = load i32, i32* %arrayidx.1, align 4
+; EPILOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; EPILOG-NEXT: %4 = load i32, ptr %arrayidx.1, align 4
; EPILOG-NEXT: %add.1 = add nsw i32 %4, %add
; EPILOG-NEXT: %cmp.1 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.1, label %latchExit.epilog-lcssa.loopexit, label %latch.1
; EPILOG-NEXT: %niter.next.1 = add nuw nsw i64 %niter.next, 1
; EPILOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.2
; EPILOG: for.exiting_block.2:
-; EPILOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.1
-; EPILOG-NEXT: %5 = load i32, i32* %arrayidx.2, align 4
+; EPILOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.1
+; EPILOG-NEXT: %5 = load i32, ptr %arrayidx.2, align 4
; EPILOG-NEXT: %add.2 = add nsw i32 %5, %add.1
; EPILOG-NEXT: %cmp.2 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.2, label %latchExit.epilog-lcssa.loopexit, label %latch.2
; EPILOG-NEXT: %niter.next.2 = add nuw nsw i64 %niter.next.1, 1
; EPILOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.3
; EPILOG: for.exiting_block.3:
-; EPILOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.2
-; EPILOG-NEXT: %6 = load i32, i32* %arrayidx.3, align 4
+; EPILOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.2
+; EPILOG-NEXT: %6 = load i32, ptr %arrayidx.3, align 4
; EPILOG-NEXT: %add.3 = add nsw i32 %6, %add.2
; EPILOG-NEXT: %cmp.3 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.3, label %latchExit.epilog-lcssa.loopexit, label %latch.3
; EPILOG-NEXT: %niter.next.3 = add nuw nsw i64 %niter.next.2, 1
; EPILOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.4
; EPILOG: for.exiting_block.4:
-; EPILOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.3
-; EPILOG-NEXT: %7 = load i32, i32* %arrayidx.4, align 4
+; EPILOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.3
+; EPILOG-NEXT: %7 = load i32, ptr %arrayidx.4, align 4
; EPILOG-NEXT: %add.4 = add nsw i32 %7, %add.3
; EPILOG-NEXT: %cmp.4 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.4, label %latchExit.epilog-lcssa.loopexit, label %latch.4
; EPILOG-NEXT: %niter.next.4 = add nuw nsw i64 %niter.next.3, 1
; EPILOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.5
; EPILOG: for.exiting_block.5:
-; EPILOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.4
-; EPILOG-NEXT: %8 = load i32, i32* %arrayidx.5, align 4
+; EPILOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.4
+; EPILOG-NEXT: %8 = load i32, ptr %arrayidx.5, align 4
; EPILOG-NEXT: %add.5 = add nsw i32 %8, %add.4
; EPILOG-NEXT: %cmp.5 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.5, label %latchExit.epilog-lcssa.loopexit, label %latch.5
; EPILOG-NEXT: %niter.next.5 = add nuw nsw i64 %niter.next.4, 1
; EPILOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.6
; EPILOG: for.exiting_block.6:
-; EPILOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.5
-; EPILOG-NEXT: %9 = load i32, i32* %arrayidx.6, align 4
+; EPILOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.5
+; EPILOG-NEXT: %9 = load i32, ptr %arrayidx.6, align 4
; EPILOG-NEXT: %add.6 = add nsw i32 %9, %add.5
; EPILOG-NEXT: %cmp.6 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.6, label %latchExit.epilog-lcssa.loopexit, label %latch.6
; EPILOG-NEXT: %niter.next.6 = add nuw nsw i64 %niter.next.5, 1
; EPILOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.7
; EPILOG: for.exiting_block.7:
-; EPILOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.6
-; EPILOG-NEXT: %10 = load i32, i32* %arrayidx.7, align 4
+; EPILOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.6
+; EPILOG-NEXT: %10 = load i32, ptr %arrayidx.7, align 4
; EPILOG-NEXT: %add.7 = add nsw i32 %10, %add.6
; EPILOG-NEXT: %cmp.7 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.7, label %latchExit.epilog-lcssa.loopexit, label %latch.7
; EPILOG-NEXT: %epil.iter = phi i64 [ 0, %header.epil.preheader ], [ %epil.iter.next, %latch.epil ]
; EPILOG-NEXT: br i1 %cond, label %for.exit2.loopexit2, label %for.exiting_block.epil
; EPILOG: for.exiting_block.epil:
-; EPILOG-NEXT: %arrayidx.epil = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.epil
-; EPILOG-NEXT: %11 = load i32, i32* %arrayidx.epil, align 4
+; EPILOG-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.epil
+; EPILOG-NEXT: %11 = load i32, ptr %arrayidx.epil, align 4
; EPILOG-NEXT: %add.epil = add nsw i32 %11, %sum.02.epil
; EPILOG-NEXT: %cmp.epil = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.epil, label %latchExit.epilog-lcssa.loopexit3, label %latch.epil
; EPILOG-BLOCK-NEXT: %niter = phi i64 [ 0, %entry.new ], [ %niter.next.1, %latch.1 ]
; EPILOG-BLOCK-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block
; EPILOG-BLOCK: for.exiting_block:
-; EPILOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; EPILOG-BLOCK-NEXT: %3 = load i32, i32* %arrayidx, align 4
+; EPILOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; EPILOG-BLOCK-NEXT: %3 = load i32, ptr %arrayidx, align 4
; EPILOG-BLOCK-NEXT: %add = add nsw i32 %3, %sum.02
; EPILOG-BLOCK-NEXT: %cmp = icmp eq i64 %n, 42
; EPILOG-BLOCK-NEXT: br i1 %cmp, label %latchExit.epilog-lcssa.loopexit, label %latch
; EPILOG-BLOCK-NEXT: %niter.next = add nuw nsw i64 %niter, 1
; EPILOG-BLOCK-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.1
; EPILOG-BLOCK: for.exiting_block.1:
-; EPILOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; EPILOG-BLOCK-NEXT: %4 = load i32, i32* %arrayidx.1, align 4
+; EPILOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; EPILOG-BLOCK-NEXT: %4 = load i32, ptr %arrayidx.1, align 4
; EPILOG-BLOCK-NEXT: %add.1 = add nsw i32 %4, %add
; EPILOG-BLOCK-NEXT: %cmp.1 = icmp eq i64 %n, 42
; EPILOG-BLOCK-NEXT: br i1 %cmp.1, label %latchExit.epilog-lcssa.loopexit, label %latch.1
; EPILOG-BLOCK: header.epil:
; EPILOG-BLOCK-NEXT: br i1 %cond, label %for.exit2, label %for.exiting_block.epil
; EPILOG-BLOCK: for.exiting_block.epil:
-; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.unr
-; EPILOG-BLOCK-NEXT: %5 = load i32, i32* %arrayidx.epil, align 4
+; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.unr
+; EPILOG-BLOCK-NEXT: %5 = load i32, ptr %arrayidx.epil, align 4
; EPILOG-BLOCK-NEXT: %add.epil = add nsw i32 %5, %sum.02.unr
; EPILOG-BLOCK-NEXT: %cmp.epil = icmp eq i64 %n, 42
; EPILOG-BLOCK-NEXT: br i1 %cmp.epil, label %latchExit.epilog-lcssa, label %latch.epil
; PROLOG-NEXT: %prol.iter = phi i64 [ 0, %header.prol.preheader ], [ %prol.iter.next, %latch.prol ]
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit1, label %for.exiting_block.prol
; PROLOG: for.exiting_block.prol:
-; PROLOG-NEXT: %arrayidx.prol = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.prol
-; PROLOG-NEXT: %2 = load i32, i32* %arrayidx.prol, align 4
+; PROLOG-NEXT: %arrayidx.prol = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.prol
+; PROLOG-NEXT: %2 = load i32, ptr %arrayidx.prol, align 4
; PROLOG-NEXT: %add.prol = add nsw i32 %2, %sum.02.prol
; PROLOG-NEXT: %cmp.prol = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.prol, label %latchExit.unr-lcssa.loopexit2, label %latch.prol
; PROLOG-NEXT: %sum.02 = phi i32 [ %sum.02.unr, %entry.new ], [ %add.7, %latch.7 ]
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block
; PROLOG: for.exiting_block:
-; PROLOG-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; PROLOG-NEXT: %4 = load i32, i32* %arrayidx, align 4
+; PROLOG-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; PROLOG-NEXT: %4 = load i32, ptr %arrayidx, align 4
; PROLOG-NEXT: %add = add nsw i32 %4, %sum.02
; PROLOG-NEXT: %cmp = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp, label %latchExit.unr-lcssa.loopexit, label %latch
; PROLOG-NEXT: %indvars.iv.next = add i64 %indvars.iv, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.1
; PROLOG: for.exiting_block.1:
-; PROLOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; PROLOG-NEXT: %5 = load i32, i32* %arrayidx.1, align 4
+; PROLOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; PROLOG-NEXT: %5 = load i32, ptr %arrayidx.1, align 4
; PROLOG-NEXT: %add.1 = add nsw i32 %5, %add
; PROLOG-NEXT: %cmp.1 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.1, label %latchExit.unr-lcssa.loopexit, label %latch.1
; PROLOG-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv.next, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.2
; PROLOG: for.exiting_block.2:
-; PROLOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.1
-; PROLOG-NEXT: %6 = load i32, i32* %arrayidx.2, align 4
+; PROLOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.1
+; PROLOG-NEXT: %6 = load i32, ptr %arrayidx.2, align 4
; PROLOG-NEXT: %add.2 = add nsw i32 %6, %add.1
; PROLOG-NEXT: %cmp.2 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.2, label %latchExit.unr-lcssa.loopexit, label %latch.2
; PROLOG-NEXT: %indvars.iv.next.2 = add i64 %indvars.iv.next.1, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.3
; PROLOG: for.exiting_block.3:
-; PROLOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.2
-; PROLOG-NEXT: %7 = load i32, i32* %arrayidx.3, align 4
+; PROLOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.2
+; PROLOG-NEXT: %7 = load i32, ptr %arrayidx.3, align 4
; PROLOG-NEXT: %add.3 = add nsw i32 %7, %add.2
; PROLOG-NEXT: %cmp.3 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.3, label %latchExit.unr-lcssa.loopexit, label %latch.3
; PROLOG-NEXT: %indvars.iv.next.3 = add i64 %indvars.iv.next.2, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.4
; PROLOG: for.exiting_block.4:
-; PROLOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.3
-; PROLOG-NEXT: %8 = load i32, i32* %arrayidx.4, align 4
+; PROLOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.3
+; PROLOG-NEXT: %8 = load i32, ptr %arrayidx.4, align 4
; PROLOG-NEXT: %add.4 = add nsw i32 %8, %add.3
; PROLOG-NEXT: %cmp.4 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.4, label %latchExit.unr-lcssa.loopexit, label %latch.4
; PROLOG-NEXT: %indvars.iv.next.4 = add i64 %indvars.iv.next.3, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.5
; PROLOG: for.exiting_block.5:
-; PROLOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.4
-; PROLOG-NEXT: %9 = load i32, i32* %arrayidx.5, align 4
+; PROLOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.4
+; PROLOG-NEXT: %9 = load i32, ptr %arrayidx.5, align 4
; PROLOG-NEXT: %add.5 = add nsw i32 %9, %add.4
; PROLOG-NEXT: %cmp.5 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.5, label %latchExit.unr-lcssa.loopexit, label %latch.5
; PROLOG-NEXT: %indvars.iv.next.5 = add i64 %indvars.iv.next.4, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.6
; PROLOG: for.exiting_block.6:
-; PROLOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.5
-; PROLOG-NEXT: %10 = load i32, i32* %arrayidx.6, align 4
+; PROLOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.5
+; PROLOG-NEXT: %10 = load i32, ptr %arrayidx.6, align 4
; PROLOG-NEXT: %add.6 = add nsw i32 %10, %add.5
; PROLOG-NEXT: %cmp.6 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.6, label %latchExit.unr-lcssa.loopexit, label %latch.6
; PROLOG-NEXT: %indvars.iv.next.6 = add i64 %indvars.iv.next.5, 1
; PROLOG-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.7
; PROLOG: for.exiting_block.7:
-; PROLOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.6
-; PROLOG-NEXT: %11 = load i32, i32* %arrayidx.7, align 4
+; PROLOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.6
+; PROLOG-NEXT: %11 = load i32, ptr %arrayidx.7, align 4
; PROLOG-NEXT: %add.7 = add nsw i32 %11, %add.6
; PROLOG-NEXT: %cmp.7 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.7, label %latchExit.unr-lcssa.loopexit, label %latch.7
; PROLOG-BLOCK: header.prol:
; PROLOG-BLOCK-NEXT: br i1 %cond, label %for.exit2, label %for.exiting_block.prol
; PROLOG-BLOCK: for.exiting_block.prol:
-; PROLOG-BLOCK-NEXT: %2 = load i32, i32* %a, align 4
+; PROLOG-BLOCK-NEXT: %2 = load i32, ptr %a, align 4
; PROLOG-BLOCK-NEXT: %cmp.prol = icmp eq i64 %n, 42
; PROLOG-BLOCK-NEXT: br i1 %cmp.prol, label %latchExit.unr-lcssa, label %latch.prol
; PROLOG-BLOCK: latch.prol:
; PROLOG-BLOCK-NEXT: %sum.02 = phi i32 [ %sum.02.unr, %entry.new ], [ %add.1, %latch.1 ]
; PROLOG-BLOCK-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block
; PROLOG-BLOCK: for.exiting_block:
-; PROLOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; PROLOG-BLOCK-NEXT: %4 = load i32, i32* %arrayidx, align 4
+; PROLOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; PROLOG-BLOCK-NEXT: %4 = load i32, ptr %arrayidx, align 4
; PROLOG-BLOCK-NEXT: %add = add nsw i32 %4, %sum.02
; PROLOG-BLOCK-NEXT: %cmp = icmp eq i64 %n, 42
; PROLOG-BLOCK-NEXT: br i1 %cmp, label %latchExit.unr-lcssa.loopexit, label %latch
; PROLOG-BLOCK-NEXT: %indvars.iv.next = add i64 %indvars.iv, 1
; PROLOG-BLOCK-NEXT: br i1 %cond, label %for.exit2.loopexit, label %for.exiting_block.1
; PROLOG-BLOCK: for.exiting_block.1:
-; PROLOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; PROLOG-BLOCK-NEXT: %5 = load i32, i32* %arrayidx.1, align 4
+; PROLOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; PROLOG-BLOCK-NEXT: %5 = load i32, ptr %arrayidx.1, align 4
; PROLOG-BLOCK-NEXT: %add.1 = add nsw i32 %5, %add
; PROLOG-BLOCK-NEXT: %cmp.1 = icmp eq i64 %n, 42
; PROLOG-BLOCK-NEXT: br i1 %cmp.1, label %latchExit.unr-lcssa.loopexit, label %latch.1
br i1 %cond, label %for.exit2, label %for.exiting_block
for.exiting_block:
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%cmp = icmp eq i64 %n, 42
br i1 %cmp, label %latchExit, label %latch
}
; test when exit blocks have successors.
-define i32 @test6(i32* nocapture %a, i64 %n, i1 %cond, i32 %x) {
+define i32 @test6(ptr nocapture %a, i64 %n, i1 %cond, i32 %x) {
; EPILOG-LABEL: @test6(
; EPILOG-NEXT: entry:
; EPILOG-NEXT: %0 = freeze i64 %n
; EPILOG-NEXT: %cmp = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp, label %for.exit2.loopexit, label %latch
; EPILOG: latch:
-; EPILOG-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; EPILOG-NEXT: %load = load i32, i32* %arrayidx, align 4
+; EPILOG-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; EPILOG-NEXT: %load = load i32, ptr %arrayidx, align 4
; EPILOG-NEXT: %add = add nsw i32 %load, %sum.02
; EPILOG-NEXT: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
; EPILOG-NEXT: %niter.next = add nuw nsw i64 %niter, 1
; EPILOG-NEXT: %cmp.1 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.1, label %for.exit2.loopexit, label %latch.1
; EPILOG: latch.1:
-; EPILOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; EPILOG-NEXT: %load.1 = load i32, i32* %arrayidx.1, align 4
+; EPILOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; EPILOG-NEXT: %load.1 = load i32, ptr %arrayidx.1, align 4
; EPILOG-NEXT: %add.1 = add nsw i32 %load.1, %add
; EPILOG-NEXT: %indvars.iv.next.1 = add nuw nsw i64 %indvars.iv.next, 1
; EPILOG-NEXT: %niter.next.1 = add nuw nsw i64 %niter.next, 1
; EPILOG-NEXT: %cmp.2 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.2, label %for.exit2.loopexit, label %latch.2
; EPILOG: latch.2:
-; EPILOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.1
-; EPILOG-NEXT: %load.2 = load i32, i32* %arrayidx.2, align 4
+; EPILOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.1
+; EPILOG-NEXT: %load.2 = load i32, ptr %arrayidx.2, align 4
; EPILOG-NEXT: %add.2 = add nsw i32 %load.2, %add.1
; EPILOG-NEXT: %indvars.iv.next.2 = add nuw nsw i64 %indvars.iv.next.1, 1
; EPILOG-NEXT: %niter.next.2 = add nuw nsw i64 %niter.next.1, 1
; EPILOG-NEXT: %cmp.3 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.3, label %for.exit2.loopexit, label %latch.3
; EPILOG: latch.3:
-; EPILOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.2
-; EPILOG-NEXT: %load.3 = load i32, i32* %arrayidx.3, align 4
+; EPILOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.2
+; EPILOG-NEXT: %load.3 = load i32, ptr %arrayidx.3, align 4
; EPILOG-NEXT: %add.3 = add nsw i32 %load.3, %add.2
; EPILOG-NEXT: %indvars.iv.next.3 = add nuw nsw i64 %indvars.iv.next.2, 1
; EPILOG-NEXT: %niter.next.3 = add nuw nsw i64 %niter.next.2, 1
; EPILOG-NEXT: %cmp.4 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.4, label %for.exit2.loopexit, label %latch.4
; EPILOG: latch.4:
-; EPILOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.3
-; EPILOG-NEXT: %load.4 = load i32, i32* %arrayidx.4, align 4
+; EPILOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.3
+; EPILOG-NEXT: %load.4 = load i32, ptr %arrayidx.4, align 4
; EPILOG-NEXT: %add.4 = add nsw i32 %load.4, %add.3
; EPILOG-NEXT: %indvars.iv.next.4 = add nuw nsw i64 %indvars.iv.next.3, 1
; EPILOG-NEXT: %niter.next.4 = add nuw nsw i64 %niter.next.3, 1
; EPILOG-NEXT: %cmp.5 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.5, label %for.exit2.loopexit, label %latch.5
; EPILOG: latch.5:
-; EPILOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.4
-; EPILOG-NEXT: %load.5 = load i32, i32* %arrayidx.5, align 4
+; EPILOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.4
+; EPILOG-NEXT: %load.5 = load i32, ptr %arrayidx.5, align 4
; EPILOG-NEXT: %add.5 = add nsw i32 %load.5, %add.4
; EPILOG-NEXT: %indvars.iv.next.5 = add nuw nsw i64 %indvars.iv.next.4, 1
; EPILOG-NEXT: %niter.next.5 = add nuw nsw i64 %niter.next.4, 1
; EPILOG-NEXT: %cmp.6 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.6, label %for.exit2.loopexit, label %latch.6
; EPILOG: latch.6:
-; EPILOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.5
-; EPILOG-NEXT: %load.6 = load i32, i32* %arrayidx.6, align 4
+; EPILOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.5
+; EPILOG-NEXT: %load.6 = load i32, ptr %arrayidx.6, align 4
; EPILOG-NEXT: %add.6 = add nsw i32 %load.6, %add.5
; EPILOG-NEXT: %indvars.iv.next.6 = add nuw nsw i64 %indvars.iv.next.5, 1
; EPILOG-NEXT: %niter.next.6 = add nuw nsw i64 %niter.next.5, 1
; EPILOG-NEXT: %cmp.7 = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.7, label %for.exit2.loopexit, label %latch.7
; EPILOG: latch.7:
-; EPILOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.6
-; EPILOG-NEXT: %load.7 = load i32, i32* %arrayidx.7, align 4
+; EPILOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.6
+; EPILOG-NEXT: %load.7 = load i32, ptr %arrayidx.7, align 4
; EPILOG-NEXT: %add.7 = add nsw i32 %load.7, %add.6
; EPILOG-NEXT: %indvars.iv.next.7 = add i64 %indvars.iv.next.6, 1
; EPILOG-NEXT: %niter.next.7 = add i64 %niter.next.6, 1
; EPILOG-NEXT: %cmp.epil = icmp eq i64 %n, 42
; EPILOG-NEXT: br i1 %cmp.epil, label %for.exit2.loopexit2, label %latch.epil
; EPILOG: latch.epil:
-; EPILOG-NEXT: %arrayidx.epil = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.epil
-; EPILOG-NEXT: %load.epil = load i32, i32* %arrayidx.epil, align 4
+; EPILOG-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.epil
+; EPILOG-NEXT: %load.epil = load i32, ptr %arrayidx.epil, align 4
; EPILOG-NEXT: %add.epil = add nsw i32 %load.epil, %sum.02.epil
; EPILOG-NEXT: %indvars.iv.next.epil = add i64 %indvars.iv.epil, 1
; EPILOG-NEXT: %exitcond.epil = icmp eq i64 %indvars.iv.next.epil, %n
; EPILOG-BLOCK-NEXT: %cmp = icmp eq i64 %n, 42
; EPILOG-BLOCK-NEXT: br i1 %cmp, label %for.exit2.loopexit, label %latch
; EPILOG-BLOCK: latch:
-; EPILOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; EPILOG-BLOCK-NEXT: %load = load i32, i32* %arrayidx, align 4
+; EPILOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; EPILOG-BLOCK-NEXT: %load = load i32, ptr %arrayidx, align 4
; EPILOG-BLOCK-NEXT: %add = add nsw i32 %load, %sum.02
; EPILOG-BLOCK-NEXT: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
; EPILOG-BLOCK-NEXT: %niter.next = add nuw nsw i64 %niter, 1
; EPILOG-BLOCK-NEXT: %cmp.1 = icmp eq i64 %n, 42
; EPILOG-BLOCK-NEXT: br i1 %cmp.1, label %for.exit2.loopexit, label %latch.1
; EPILOG-BLOCK: latch.1:
-; EPILOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; EPILOG-BLOCK-NEXT: %load.1 = load i32, i32* %arrayidx.1, align 4
+; EPILOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; EPILOG-BLOCK-NEXT: %load.1 = load i32, ptr %arrayidx.1, align 4
; EPILOG-BLOCK-NEXT: %add.1 = add nsw i32 %load.1, %add
; EPILOG-BLOCK-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv.next, 1
; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter.next, 1
; EPILOG-BLOCK-NEXT: %cmp.epil = icmp eq i64 %n, 42
; EPILOG-BLOCK-NEXT: br i1 %cmp.epil, label %for.exit2, label %latch.epil
; EPILOG-BLOCK: latch.epil:
-; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.unr
-; EPILOG-BLOCK-NEXT: %load.epil = load i32, i32* %arrayidx.epil, align 4
+; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.unr
+; EPILOG-BLOCK-NEXT: %load.epil = load i32, ptr %arrayidx.epil, align 4
; EPILOG-BLOCK-NEXT: %add.epil = add nsw i32 %load.epil, %sum.02.unr
; EPILOG-BLOCK-NEXT: br label %latch_exit
; EPILOG-BLOCK: latch_exit:
; PROLOG-NEXT: %cmp.prol = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.prol, label %for.exit2.loopexit1, label %latch.prol
; PROLOG: latch.prol:
-; PROLOG-NEXT: %arrayidx.prol = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.prol
-; PROLOG-NEXT: %load.prol = load i32, i32* %arrayidx.prol, align 4
+; PROLOG-NEXT: %arrayidx.prol = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.prol
+; PROLOG-NEXT: %load.prol = load i32, ptr %arrayidx.prol, align 4
; PROLOG-NEXT: %add.prol = add nsw i32 %load.prol, %sum.02.prol
; PROLOG-NEXT: %indvars.iv.next.prol = add i64 %indvars.iv.prol, 1
; PROLOG-NEXT: %exitcond.prol = icmp eq i64 %indvars.iv.next.prol, %n
; PROLOG-NEXT: %cmp = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp, label %for.exit2.loopexit, label %latch
; PROLOG: latch:
-; PROLOG-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; PROLOG-NEXT: %load = load i32, i32* %arrayidx, align 4
+; PROLOG-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; PROLOG-NEXT: %load = load i32, ptr %arrayidx, align 4
; PROLOG-NEXT: %add = add nsw i32 %load, %sum.02
; PROLOG-NEXT: %indvars.iv.next = add i64 %indvars.iv, 1
; PROLOG-NEXT: br i1 false, label %for.exit2.loopexit, label %for.exiting_block.1
; PROLOG-NEXT: %cmp.1 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.1, label %for.exit2.loopexit, label %latch.1
; PROLOG: latch.1:
-; PROLOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; PROLOG-NEXT: %load.1 = load i32, i32* %arrayidx.1, align 4
+; PROLOG-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; PROLOG-NEXT: %load.1 = load i32, ptr %arrayidx.1, align 4
; PROLOG-NEXT: %add.1 = add nsw i32 %load.1, %add
; PROLOG-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv.next, 1
; PROLOG-NEXT: br i1 false, label %for.exit2.loopexit, label %for.exiting_block.2
; PROLOG-NEXT: %cmp.2 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.2, label %for.exit2.loopexit, label %latch.2
; PROLOG: latch.2:
-; PROLOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.1
-; PROLOG-NEXT: %load.2 = load i32, i32* %arrayidx.2, align 4
+; PROLOG-NEXT: %arrayidx.2 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.1
+; PROLOG-NEXT: %load.2 = load i32, ptr %arrayidx.2, align 4
; PROLOG-NEXT: %add.2 = add nsw i32 %load.2, %add.1
; PROLOG-NEXT: %indvars.iv.next.2 = add i64 %indvars.iv.next.1, 1
; PROLOG-NEXT: br i1 false, label %for.exit2.loopexit, label %for.exiting_block.3
; PROLOG-NEXT: %cmp.3 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.3, label %for.exit2.loopexit, label %latch.3
; PROLOG: latch.3:
-; PROLOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.2
-; PROLOG-NEXT: %load.3 = load i32, i32* %arrayidx.3, align 4
+; PROLOG-NEXT: %arrayidx.3 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.2
+; PROLOG-NEXT: %load.3 = load i32, ptr %arrayidx.3, align 4
; PROLOG-NEXT: %add.3 = add nsw i32 %load.3, %add.2
; PROLOG-NEXT: %indvars.iv.next.3 = add i64 %indvars.iv.next.2, 1
; PROLOG-NEXT: br i1 false, label %for.exit2.loopexit, label %for.exiting_block.4
; PROLOG-NEXT: %cmp.4 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.4, label %for.exit2.loopexit, label %latch.4
; PROLOG: latch.4:
-; PROLOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.3
-; PROLOG-NEXT: %load.4 = load i32, i32* %arrayidx.4, align 4
+; PROLOG-NEXT: %arrayidx.4 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.3
+; PROLOG-NEXT: %load.4 = load i32, ptr %arrayidx.4, align 4
; PROLOG-NEXT: %add.4 = add nsw i32 %load.4, %add.3
; PROLOG-NEXT: %indvars.iv.next.4 = add i64 %indvars.iv.next.3, 1
; PROLOG-NEXT: br i1 false, label %for.exit2.loopexit, label %for.exiting_block.5
; PROLOG-NEXT: %cmp.5 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.5, label %for.exit2.loopexit, label %latch.5
; PROLOG: latch.5:
-; PROLOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.4
-; PROLOG-NEXT: %load.5 = load i32, i32* %arrayidx.5, align 4
+; PROLOG-NEXT: %arrayidx.5 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.4
+; PROLOG-NEXT: %load.5 = load i32, ptr %arrayidx.5, align 4
; PROLOG-NEXT: %add.5 = add nsw i32 %load.5, %add.4
; PROLOG-NEXT: %indvars.iv.next.5 = add i64 %indvars.iv.next.4, 1
; PROLOG-NEXT: br i1 false, label %for.exit2.loopexit, label %for.exiting_block.6
; PROLOG-NEXT: %cmp.6 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.6, label %for.exit2.loopexit, label %latch.6
; PROLOG: latch.6:
-; PROLOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.5
-; PROLOG-NEXT: %load.6 = load i32, i32* %arrayidx.6, align 4
+; PROLOG-NEXT: %arrayidx.6 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.5
+; PROLOG-NEXT: %load.6 = load i32, ptr %arrayidx.6, align 4
; PROLOG-NEXT: %add.6 = add nsw i32 %load.6, %add.5
; PROLOG-NEXT: %indvars.iv.next.6 = add i64 %indvars.iv.next.5, 1
; PROLOG-NEXT: br i1 false, label %for.exit2.loopexit, label %for.exiting_block.7
; PROLOG-NEXT: %cmp.7 = icmp eq i64 %n, 42
; PROLOG-NEXT: br i1 %cmp.7, label %for.exit2.loopexit, label %latch.7
; PROLOG: latch.7:
-; PROLOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next.6
-; PROLOG-NEXT: %load.7 = load i32, i32* %arrayidx.7, align 4
+; PROLOG-NEXT: %arrayidx.7 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next.6
+; PROLOG-NEXT: %load.7 = load i32, ptr %arrayidx.7, align 4
; PROLOG-NEXT: %add.7 = add nsw i32 %load.7, %add.6
; PROLOG-NEXT: %indvars.iv.next.7 = add i64 %indvars.iv.next.6, 1
; PROLOG-NEXT: %exitcond.7 = icmp eq i64 %indvars.iv.next.7, %n
; PROLOG-BLOCK-NEXT: %cmp.prol = icmp eq i64 %n, 42
; PROLOG-BLOCK-NEXT: br i1 %cmp.prol, label %for.exit2, label %latch.prol
; PROLOG-BLOCK: latch.prol:
-; PROLOG-BLOCK-NEXT: %load.prol = load i32, i32* %a, align 4
+; PROLOG-BLOCK-NEXT: %load.prol = load i32, ptr %a, align 4
; PROLOG-BLOCK-NEXT: br label %header.prol.loopexit
; PROLOG-BLOCK: header.prol.loopexit:
; PROLOG-BLOCK-NEXT: %sum.0.lcssa.unr = phi i32 [ undef, %entry ], [ %load.prol, %latch.prol ]
; PROLOG-BLOCK-NEXT: %cmp = icmp eq i64 %n, 42
; PROLOG-BLOCK-NEXT: br i1 %cmp, label %for.exit2.loopexit, label %latch
; PROLOG-BLOCK: latch:
-; PROLOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-; PROLOG-BLOCK-NEXT: %load = load i32, i32* %arrayidx, align 4
+; PROLOG-BLOCK-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+; PROLOG-BLOCK-NEXT: %load = load i32, ptr %arrayidx, align 4
; PROLOG-BLOCK-NEXT: %add = add nsw i32 %load, %sum.02
; PROLOG-BLOCK-NEXT: %indvars.iv.next = add i64 %indvars.iv, 1
; PROLOG-BLOCK-NEXT: br i1 false, label %for.exit2.loopexit, label %for.exiting_block.1
; PROLOG-BLOCK-NEXT: %cmp.1 = icmp eq i64 %n, 42
; PROLOG-BLOCK-NEXT: br i1 %cmp.1, label %for.exit2.loopexit, label %latch.1
; PROLOG-BLOCK: latch.1:
-; PROLOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
-; PROLOG-BLOCK-NEXT: %load.1 = load i32, i32* %arrayidx.1, align 4
+; PROLOG-BLOCK-NEXT: %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
+; PROLOG-BLOCK-NEXT: %load.1 = load i32, ptr %arrayidx.1, align 4
; PROLOG-BLOCK-NEXT: %add.1 = add nsw i32 %load.1, %add
; PROLOG-BLOCK-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv.next, 1
; PROLOG-BLOCK-NEXT: %exitcond.1 = icmp eq i64 %indvars.iv.next.1, %n
br i1 %cmp, label %for.exit2, label %latch
latch:
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %load = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %load = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %load, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, %n
ret void
}
-declare i8 addrspace(1)* @foo(i32)
+declare ptr addrspace(1) @foo(i32)
; inner loop prolog unrolled
; a value from outer loop is used in exit block of inner loop.
; Don't create VMap entries for such values (%trip).
-define i8 addrspace(1)* @test9(i8* nocapture readonly %arg, i32 %n) {
+define ptr addrspace(1) @test9(ptr nocapture readonly %arg, i32 %n) {
; EPILOG-LABEL: @test9(
; EPILOG-NEXT: bb:
; EPILOG-NEXT: %0 = add i32 %n, -1
; EPILOG-NEXT: br label %innerexit
; EPILOG: innerexit:
; EPILOG-NEXT: %trip.lcssa = phi i32 [ %trip.lcssa.ph, %innerexit.loopexit ], [ %trip.lcssa.ph2, %innerexit.loopexit1 ]
-; EPILOG-NEXT: %i9 = call i8 addrspace(1)* @foo(i32 %trip.lcssa)
-; EPILOG-NEXT: ret i8 addrspace(1)* %i9
+; EPILOG-NEXT: %i9 = call ptr addrspace(1) @foo(i32 %trip.lcssa)
+; EPILOG-NEXT: ret ptr addrspace(1) %i9
; EPILOG: latch:
; EPILOG-NEXT: %iv.next = add nuw nsw i64 %phi, 1
; EPILOG-NEXT: %niter.next = add nuw nsw i32 %niter, 1
; EPILOG-BLOCK-NEXT: br label %innerexit
; EPILOG-BLOCK: innerexit:
; EPILOG-BLOCK-NEXT: %trip.lcssa = phi i32 [ %trip.lcssa.ph, %innerexit.loopexit ], [ %trip.lcssa.ph2, %innerexit.loopexit1 ]
-; EPILOG-BLOCK-NEXT: %i9 = call i8 addrspace(1)* @foo(i32 %trip.lcssa)
-; EPILOG-BLOCK-NEXT: ret i8 addrspace(1)* %i9
+; EPILOG-BLOCK-NEXT: %i9 = call ptr addrspace(1) @foo(i32 %trip.lcssa)
+; EPILOG-BLOCK-NEXT: ret ptr addrspace(1) %i9
; EPILOG-BLOCK: latch:
; EPILOG-BLOCK-NEXT: %iv.next = add nuw nsw i64 %phi, 1
; EPILOG-BLOCK-NEXT: %niter.next = add nuw nsw i32 %niter, 1
; PROLOG-NEXT: br label %innerexit
; PROLOG: innerexit:
; PROLOG-NEXT: %trip.lcssa = phi i32 [ %trip.lcssa.ph, %innerexit.loopexit ], [ %trip.lcssa.ph2, %innerexit.loopexit1 ]
-; PROLOG-NEXT: %i9 = call i8 addrspace(1)* @foo(i32 %trip.lcssa)
-; PROLOG-NEXT: ret i8 addrspace(1)* %i9
+; PROLOG-NEXT: %i9 = call ptr addrspace(1) @foo(i32 %trip.lcssa)
+; PROLOG-NEXT: ret ptr addrspace(1) %i9
; PROLOG: latch:
; PROLOG-NEXT: %iv.next = add nuw nsw i64 %phi, 1
; PROLOG-NEXT: br i1 true, label %latch.1, label %innerexit.loopexit
; PROLOG-BLOCK-NEXT: br label %innerexit
; PROLOG-BLOCK: innerexit:
; PROLOG-BLOCK-NEXT: %trip.lcssa = phi i32 [ %trip.lcssa.ph, %innerexit.loopexit ], [ %trip.lcssa.ph2, %innerexit.loopexit1 ]
-; PROLOG-BLOCK-NEXT: %i9 = call i8 addrspace(1)* @foo(i32 %trip.lcssa)
-; PROLOG-BLOCK-NEXT: ret i8 addrspace(1)* %i9
+; PROLOG-BLOCK-NEXT: %i9 = call ptr addrspace(1) @foo(i32 %trip.lcssa)
+; PROLOG-BLOCK-NEXT: ret ptr addrspace(1) %i9
; PROLOG-BLOCK: latch:
; PROLOG-BLOCK-NEXT: %iv.next = add nuw nsw i64 %phi, 1
; PROLOG-BLOCK-NEXT: %i7.1 = trunc i64 %iv.next to i32
br i1 true, label %latch, label %innerexit
innerexit: ; preds = %header
- %i9 = call i8 addrspace(1)* @foo(i32 %trip)
- ret i8 addrspace(1)* %i9
+ %i9 = call ptr addrspace(1) @foo(i32 %trip)
+ ret ptr addrspace(1) %i9
latch: ; preds = %header
%i11 = add nsw i32 %i7, 1
; RUN: opt < %s -S -passes=loop-unroll -unroll-runtime=true -unroll-allow-remainder=true -unroll-count=4
; Make sure that the runtime unroll does not break with a non-exiting latch.
-define i32 @test(i32* %a, i32* %b, i32* %c, i64 %n) {
+define i32 @test(ptr %a, ptr %b, ptr %c, i64 %n) {
entry:
br label %while.cond
br i1 %cmp, label %while.body, label %while.end
while.body: ; preds = %while.cond
- %arrayidx = getelementptr inbounds i32, i32* %b, i64 %i.0
- %0 = load i32, i32* %arrayidx
- %arrayidx1 = getelementptr inbounds i32, i32* %c, i64 %i.0
- %1 = load i32, i32* %arrayidx1
+ %arrayidx = getelementptr inbounds i32, ptr %b, i64 %i.0
+ %0 = load i32, ptr %arrayidx
+ %arrayidx1 = getelementptr inbounds i32, ptr %c, i64 %i.0
+ %1 = load i32, ptr %arrayidx1
%mul = mul nsw i32 %0, %1
- %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %i.0
- store i32 %mul, i32* %arrayidx2
+ %arrayidx2 = getelementptr inbounds i32, ptr %a, i64 %i.0
+ store i32 %mul, ptr %arrayidx2
%inc = add nsw i64 %i.0, 1
br label %while.cond
; NOPROLOG-NOT: for.body.prol:
-define i32 @test(i32* nocapture %a, i32 %n) nounwind uwtable readonly {
+define i32 @test(ptr nocapture %a, i32 %n) nounwind uwtable readonly {
entry:
%cmp1 = icmp eq i32 %n, 0
br i1 %cmp1, label %for.end, label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
; COMMON-NOT: for.body.epil:
; COMMON-NOT: for.body.prol:
-define i32 @test1(i32* nocapture %a) nounwind uwtable readonly {
+define i32 @test1(ptr nocapture %a) nounwind uwtable readonly {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.01 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %sum.01
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
; NOPROLOG: for.body:
; NOPROLOG-NOT: for.body.prol:
-define zeroext i16 @down(i16* nocapture %p, i32 %len) nounwind uwtable readonly {
+define zeroext i16 @down(ptr nocapture %p, i32 %len) nounwind uwtable readonly {
entry:
%cmp2 = icmp eq i32 %len, 0
br i1 %cmp2, label %for.end, label %for.body
for.body: ; preds = %for.body, %entry
- %p.addr.05 = phi i16* [ %incdec.ptr, %for.body ], [ %p, %entry ]
+ %p.addr.05 = phi ptr [ %incdec.ptr, %for.body ], [ %p, %entry ]
%len.addr.04 = phi i32 [ %sub, %for.body ], [ %len, %entry ]
%res.03 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %incdec.ptr = getelementptr inbounds i16, i16* %p.addr.05, i64 1
- %0 = load i16, i16* %p.addr.05, align 2
+ %incdec.ptr = getelementptr inbounds i16, ptr %p.addr.05, i64 1
+ %0 = load i16, ptr %p.addr.05, align 2
%conv = zext i16 %0 to i32
%add = add i32 %conv, %res.03
%sub = add nsw i32 %len.addr.04, -2
; NOPROLOG: for.body:
; NOPROLOG-NOT: for.body.prol:
-define zeroext i16 @test2(i16* nocapture %p, i32 %len) nounwind uwtable readonly {
+define zeroext i16 @test2(ptr nocapture %p, i32 %len) nounwind uwtable readonly {
entry:
%cmp2 = icmp eq i32 %len, 0
br i1 %cmp2, label %for.end, label %for.body
for.body: ; preds = %for.body, %entry
- %p.addr.05 = phi i16* [ %incdec.ptr, %for.body ], [ %p, %entry ]
+ %p.addr.05 = phi ptr [ %incdec.ptr, %for.body ], [ %p, %entry ]
%len.addr.04 = phi i32 [ %sub, %for.body ], [ %len, %entry ]
%res.03 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %incdec.ptr = getelementptr inbounds i16, i16* %p.addr.05, i64 1
- %0 = load i16, i16* %p.addr.05, align 2
+ %incdec.ptr = getelementptr inbounds i16, ptr %p.addr.05, i64 1
+ %0 = load i16, ptr %p.addr.05, align 2
%conv = zext i16 %0 to i32
%add = add i32 %conv, %res.03
%sub = add nsw i32 %len.addr.04, -2
; PROLOG-DAG: [[PH_LOC]] = !DILocation(line: 102, column: 1, scope: !{{.*}})
-define i32 @test(i32* nocapture %a, i32 %n) nounwind uwtable readonly !dbg !6 {
+define i32 @test(ptr nocapture %a, i32 %n) nounwind uwtable readonly !dbg !6 {
entry:
%cmp1 = icmp eq i32 %n, 0, !dbg !7
br i1 %cmp1, label %for.end, label %for.body, !dbg !7
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv, !dbg !8
- %0 = load i32, i32* %arrayidx, align 4, !dbg !8
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv, !dbg !8
+ %0 = load i32, ptr %arrayidx, align 4, !dbg !8
%add = add nsw i32 %0, %sum.02, !dbg !8
%indvars.iv.next = add i64 %indvars.iv, 1, !dbg !9
%lftr.wideiv = trunc i64 %indvars.iv.next to i32, !dbg !9
; PROLOG: br i1 %exitcond.3, label %for.end.loopexit{{.*}}, label %for.body
; PROLOG-NOT: br i1 %exitcond.4, label %for.end.loopexit{{.*}}, label %for.body
-define i32 @test(i32* nocapture %a, i32 %n) nounwind uwtable readonly {
+define i32 @test(ptr nocapture %a, i32 %n) nounwind uwtable readonly {
entry:
%cmp1 = icmp eq i32 %n, 0
br i1 %cmp1, label %for.end, label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
; STATS: 2 loop-unroll - Number of loops unrolled (completely or otherwise)
-define i32 @nested(i32* nocapture %a, i32 %n, i32 %m) nounwind uwtable readonly {
+define i32 @nested(ptr nocapture %a, i32 %n, i32 %m) nounwind uwtable readonly {
entry:
%cmp11 = icmp sgt i32 %n, 0
br i1 %cmp11, label %for.cond1.preheader.lr.ph, label %for.end7
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body3 ], [ 0, %for.cond1.preheader ]
%sum.19 = phi i32 [ %add4, %for.body3 ], [ %sum.012, %for.cond1.preheader ]
%0 = add nsw i64 %indvars.iv, %indvars.iv16
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %0
- %1 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %0
+ %1 = load i32, ptr %arrayidx, align 4
%add4 = add nsw i32 %1, %sum.19
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
; PROLOG: br i1 %lcmp.mod
; PROLOG: loop2.prol:
-define void @unroll(i32 %iter, i32* %addr1, i32* %addr2) nounwind {
+define void @unroll(i32 %iter, ptr %addr1, ptr %addr2) nounwind {
entry:
br label %loop1
loop1:
%iv1 = phi i32 [ 0, %entry ], [ %inc1, %loop1.latch ]
- %offset1 = getelementptr i32, i32* %addr1, i32 %iv1
- store i32 %iv1, i32* %offset1, align 4
+ %offset1 = getelementptr i32, ptr %addr1, i32 %iv1
+ store i32 %iv1, ptr %offset1, align 4
br label %loop2.header
loop2.header:
loop2:
%iv2 = phi i32 [ 0, %loop2.header ], [ %inc2, %loop2 ]
- %offset2 = getelementptr i32, i32* %addr2, i32 %iv2
- store i32 %iv2, i32* %offset2, align 4
+ %offset2 = getelementptr i32, ptr %addr2, i32 %iv2
+ store i32 %iv2, ptr %offset2, align 4
%inc2 = add i32 %iv2, 1
%exitcnd2 = icmp uge i32 %inc2, %iter
br i1 %exitcnd2, label %exit2, label %loop2
; Given that the trip-count of this loop is a 3-bit value, we cannot
; safely unroll it with a count of anything more than 8.
-define i3 @test(i3* %a, i3 %n) {
+define i3 @test(ptr %a, i3 %n) {
; UNROLL-16-LABEL: @test(
; UNROLL-16-NEXT: entry:
; UNROLL-16-NEXT: [[CMP1:%.*]] = icmp eq i3 [[N:%.*]], 0
; UNROLL-16: for.body.preheader:
; UNROLL-16-NEXT: br label [[FOR_BODY:%.*]]
; UNROLL-16: for.body:
-; UNROLL-16-NEXT: [[TMP0:%.*]] = load i3, i3* [[A:%.*]], align 1
+; UNROLL-16-NEXT: [[TMP0:%.*]] = load i3, ptr [[A:%.*]], align 1
; UNROLL-16-NEXT: [[EXITCOND:%.*]] = icmp eq i3 1, [[N]]
; UNROLL-16-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_1:%.*]]
; UNROLL-16: for.body.1:
-; UNROLL-16-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i3, i3* [[A]], i64 1
-; UNROLL-16-NEXT: [[TMP1:%.*]] = load i3, i3* [[ARRAYIDX_1]], align 1
+; UNROLL-16-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i3, ptr [[A]], i64 1
+; UNROLL-16-NEXT: [[TMP1:%.*]] = load i3, ptr [[ARRAYIDX_1]], align 1
; UNROLL-16-NEXT: [[ADD_1:%.*]] = add nsw i3 [[TMP1]], [[TMP0]]
; UNROLL-16-NEXT: [[EXITCOND_1:%.*]] = icmp eq i3 2, [[N]]
; UNROLL-16-NEXT: br i1 [[EXITCOND_1]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY_2:%.*]]
; UNROLL-16: for.body.2:
-; UNROLL-16-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i3, i3* [[A]], i64 2
-; UNROLL-16-NEXT: [[TMP2:%.*]] = load i3, i3* [[ARRAYIDX_2]], align 1
+; UNROLL-16-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i3, ptr [[A]], i64 2
+; UNROLL-16-NEXT: [[TMP2:%.*]] = load i3, ptr [[ARRAYIDX_2]], align 1
; UNROLL-16-NEXT: [[ADD_2:%.*]] = add nsw i3 [[TMP2]], [[ADD_1]]
; UNROLL-16-NEXT: [[EXITCOND_2:%.*]] = icmp eq i3 3, [[N]]
; UNROLL-16-NEXT: br i1 [[EXITCOND_2]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY_3:%.*]]
; UNROLL-16: for.body.3:
-; UNROLL-16-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i3, i3* [[A]], i64 3
-; UNROLL-16-NEXT: [[TMP3:%.*]] = load i3, i3* [[ARRAYIDX_3]], align 1
+; UNROLL-16-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i3, ptr [[A]], i64 3
+; UNROLL-16-NEXT: [[TMP3:%.*]] = load i3, ptr [[ARRAYIDX_3]], align 1
; UNROLL-16-NEXT: [[ADD_3:%.*]] = add nsw i3 [[TMP3]], [[ADD_2]]
; UNROLL-16-NEXT: [[EXITCOND_3:%.*]] = icmp eq i3 -4, [[N]]
; UNROLL-16-NEXT: br i1 [[EXITCOND_3]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY_4:%.*]]
; UNROLL-16: for.body.4:
-; UNROLL-16-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i3, i3* [[A]], i64 4
-; UNROLL-16-NEXT: [[TMP4:%.*]] = load i3, i3* [[ARRAYIDX_4]], align 1
+; UNROLL-16-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i3, ptr [[A]], i64 4
+; UNROLL-16-NEXT: [[TMP4:%.*]] = load i3, ptr [[ARRAYIDX_4]], align 1
; UNROLL-16-NEXT: [[ADD_4:%.*]] = add nsw i3 [[TMP4]], [[ADD_3]]
; UNROLL-16-NEXT: [[EXITCOND_4:%.*]] = icmp eq i3 -3, [[N]]
; UNROLL-16-NEXT: br i1 [[EXITCOND_4]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY_5:%.*]]
; UNROLL-16: for.body.5:
-; UNROLL-16-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i3, i3* [[A]], i64 5
-; UNROLL-16-NEXT: [[TMP5:%.*]] = load i3, i3* [[ARRAYIDX_5]], align 1
+; UNROLL-16-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i3, ptr [[A]], i64 5
+; UNROLL-16-NEXT: [[TMP5:%.*]] = load i3, ptr [[ARRAYIDX_5]], align 1
; UNROLL-16-NEXT: [[ADD_5:%.*]] = add nsw i3 [[TMP5]], [[ADD_4]]
; UNROLL-16-NEXT: [[EXITCOND_5:%.*]] = icmp eq i3 -2, [[N]]
; UNROLL-16-NEXT: br i1 [[EXITCOND_5]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY_6:%.*]]
; UNROLL-16: for.body.6:
-; UNROLL-16-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i3, i3* [[A]], i64 6
-; UNROLL-16-NEXT: [[TMP6:%.*]] = load i3, i3* [[ARRAYIDX_6]], align 1
+; UNROLL-16-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i3, ptr [[A]], i64 6
+; UNROLL-16-NEXT: [[TMP6:%.*]] = load i3, ptr [[ARRAYIDX_6]], align 1
; UNROLL-16-NEXT: [[ADD_6:%.*]] = add nsw i3 [[TMP6]], [[ADD_5]]
; UNROLL-16-NEXT: br label [[FOR_END_LOOPEXIT]]
; UNROLL-16: for.end.loopexit:
; UNROLL-4-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_PREHEADER_NEW]] ], [ [[INDVARS_IV_NEXT_3:%.*]], [[FOR_BODY]] ]
; UNROLL-4-NEXT: [[SUM_02:%.*]] = phi i3 [ 0, [[FOR_BODY_PREHEADER_NEW]] ], [ [[ADD_3:%.*]], [[FOR_BODY]] ]
; UNROLL-4-NEXT: [[NITER:%.*]] = phi i3 [ 0, [[FOR_BODY_PREHEADER_NEW]] ], [ [[NITER_NEXT_3:%.*]], [[FOR_BODY]] ]
-; UNROLL-4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i3, i3* [[A:%.*]], i64 [[INDVARS_IV]]
-; UNROLL-4-NEXT: [[TMP2:%.*]] = load i3, i3* [[ARRAYIDX]], align 1
+; UNROLL-4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i3, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; UNROLL-4-NEXT: [[TMP2:%.*]] = load i3, ptr [[ARRAYIDX]], align 1
; UNROLL-4-NEXT: [[ADD:%.*]] = add nsw i3 [[TMP2]], [[SUM_02]]
; UNROLL-4-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1
; UNROLL-4-NEXT: [[NITER_NEXT:%.*]] = add nuw nsw i3 [[NITER]], 1
-; UNROLL-4-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i3, i3* [[A]], i64 [[INDVARS_IV_NEXT]]
-; UNROLL-4-NEXT: [[TMP3:%.*]] = load i3, i3* [[ARRAYIDX_1]], align 1
+; UNROLL-4-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i3, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
+; UNROLL-4-NEXT: [[TMP3:%.*]] = load i3, ptr [[ARRAYIDX_1]], align 1
; UNROLL-4-NEXT: [[ADD_1:%.*]] = add nsw i3 [[TMP3]], [[ADD]]
; UNROLL-4-NEXT: [[INDVARS_IV_NEXT_1:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT]], 1
; UNROLL-4-NEXT: [[NITER_NEXT_1:%.*]] = add nuw nsw i3 [[NITER_NEXT]], 1
-; UNROLL-4-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i3, i3* [[A]], i64 [[INDVARS_IV_NEXT_1]]
-; UNROLL-4-NEXT: [[TMP4:%.*]] = load i3, i3* [[ARRAYIDX_2]], align 1
+; UNROLL-4-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i3, ptr [[A]], i64 [[INDVARS_IV_NEXT_1]]
+; UNROLL-4-NEXT: [[TMP4:%.*]] = load i3, ptr [[ARRAYIDX_2]], align 1
; UNROLL-4-NEXT: [[ADD_2:%.*]] = add nsw i3 [[TMP4]], [[ADD_1]]
; UNROLL-4-NEXT: [[INDVARS_IV_NEXT_2:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_1]], 1
; UNROLL-4-NEXT: [[NITER_NEXT_2:%.*]] = add nuw nsw i3 [[NITER_NEXT_1]], 1
-; UNROLL-4-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i3, i3* [[A]], i64 [[INDVARS_IV_NEXT_2]]
-; UNROLL-4-NEXT: [[TMP5:%.*]] = load i3, i3* [[ARRAYIDX_3]], align 1
+; UNROLL-4-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i3, ptr [[A]], i64 [[INDVARS_IV_NEXT_2]]
+; UNROLL-4-NEXT: [[TMP5:%.*]] = load i3, ptr [[ARRAYIDX_3]], align 1
; UNROLL-4-NEXT: [[ADD_3]] = add nsw i3 [[TMP5]], [[ADD_2]]
; UNROLL-4-NEXT: [[INDVARS_IV_NEXT_3]] = add nuw nsw i64 [[INDVARS_IV_NEXT_2]], 1
; UNROLL-4-NEXT: [[NITER_NEXT_3]] = add i3 [[NITER_NEXT_2]], 1
; UNROLL-4-NEXT: [[INDVARS_IV_EPIL:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_EPIL:%.*]], [[FOR_BODY_EPIL]] ], [ [[INDVARS_IV_UNR]], [[FOR_BODY_EPIL_PREHEADER]] ]
; UNROLL-4-NEXT: [[SUM_02_EPIL:%.*]] = phi i3 [ [[ADD_EPIL:%.*]], [[FOR_BODY_EPIL]] ], [ [[SUM_02_UNR]], [[FOR_BODY_EPIL_PREHEADER]] ]
; UNROLL-4-NEXT: [[EPIL_ITER:%.*]] = phi i3 [ 0, [[FOR_BODY_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], [[FOR_BODY_EPIL]] ]
-; UNROLL-4-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i3, i3* [[A]], i64 [[INDVARS_IV_EPIL]]
-; UNROLL-4-NEXT: [[TMP6:%.*]] = load i3, i3* [[ARRAYIDX_EPIL]], align 1
+; UNROLL-4-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i3, ptr [[A]], i64 [[INDVARS_IV_EPIL]]
+; UNROLL-4-NEXT: [[TMP6:%.*]] = load i3, ptr [[ARRAYIDX_EPIL]], align 1
; UNROLL-4-NEXT: [[ADD_EPIL]] = add nsw i3 [[TMP6]], [[SUM_02_EPIL]]
; UNROLL-4-NEXT: [[INDVARS_IV_NEXT_EPIL]] = add i64 [[INDVARS_IV_EPIL]], 1
; UNROLL-4-NEXT: [[LFTR_WIDEIV_EPIL:%.*]] = trunc i64 [[INDVARS_IV_NEXT_EPIL]] to i3
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i3 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i3, i3* %a, i64 %indvars.iv
- %0 = load i3, i3* %arrayidx
+ %arrayidx = getelementptr inbounds i3, ptr %a, i64 %indvars.iv
+ %0 = load i3, ptr %arrayidx
%add = add nsw i3 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i3
; functional tests with forced unroll factors.
; the second exit block is a deopt block. The loop has one exiting block other than the latch.
-define i32 @test1(i32* nocapture %a, i64 %n) {
+define i32 @test1(ptr nocapture %a, i64 %n) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = freeze i64 [[N:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[TMP0]], 42
; CHECK-NEXT: br i1 [[CMP]], label [[OTHEREXIT_LOOPEXIT:%.*]], label [[LATCH:%.*]]
; CHECK: latch:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[SUM_02]]
; CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = or i64 [[INDVARS_IV]], 1
; CHECK-NEXT: br label [[FOR_EXITING_BLOCK_1:%.*]]
; CHECK-NEXT: [[CMP_1:%.*]] = icmp eq i64 [[TMP0]], 42
; CHECK-NEXT: br i1 [[CMP_1]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_1:%.*]]
; CHECK: latch.1:
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT]]
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX_1]], align 4
; CHECK-NEXT: [[ADD_1:%.*]] = add nsw i32 [[TMP4]], [[ADD]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_1:%.*]] = or i64 [[INDVARS_IV]], 2
; CHECK-NEXT: br label [[FOR_EXITING_BLOCK_2:%.*]]
; CHECK-NEXT: [[CMP_2:%.*]] = icmp eq i64 [[TMP0]], 42
; CHECK-NEXT: br i1 [[CMP_2]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_2:%.*]]
; CHECK: latch.2:
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_1]]
-; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_1]]
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX_2]], align 4
; CHECK-NEXT: [[ADD_2:%.*]] = add nsw i32 [[TMP5]], [[ADD_1]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_2:%.*]] = or i64 [[INDVARS_IV]], 3
; CHECK-NEXT: br label [[FOR_EXITING_BLOCK_3:%.*]]
; CHECK-NEXT: [[CMP_3:%.*]] = icmp eq i64 [[TMP0]], 42
; CHECK-NEXT: br i1 [[CMP_3]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_3:%.*]]
; CHECK: latch.3:
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_2]]
-; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_2]]
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX_3]], align 4
; CHECK-NEXT: [[ADD_3:%.*]] = add nsw i32 [[TMP6]], [[ADD_2]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_3:%.*]] = or i64 [[INDVARS_IV]], 4
; CHECK-NEXT: br label [[FOR_EXITING_BLOCK_4:%.*]]
; CHECK-NEXT: [[CMP_4:%.*]] = icmp eq i64 [[TMP0]], 42
; CHECK-NEXT: br i1 [[CMP_4]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_4:%.*]]
; CHECK: latch.4:
-; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_3]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX_4]], align 4
+; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_3]]
+; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX_4]], align 4
; CHECK-NEXT: [[ADD_4:%.*]] = add nsw i32 [[TMP7]], [[ADD_3]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_4:%.*]] = or i64 [[INDVARS_IV]], 5
; CHECK-NEXT: br label [[FOR_EXITING_BLOCK_5:%.*]]
; CHECK-NEXT: [[CMP_5:%.*]] = icmp eq i64 [[TMP0]], 42
; CHECK-NEXT: br i1 [[CMP_5]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_5:%.*]]
; CHECK: latch.5:
-; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_4]]
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX_5]], align 4
+; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_4]]
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX_5]], align 4
; CHECK-NEXT: [[ADD_5:%.*]] = add nsw i32 [[TMP8]], [[ADD_4]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_5:%.*]] = or i64 [[INDVARS_IV]], 6
; CHECK-NEXT: br label [[FOR_EXITING_BLOCK_6:%.*]]
; CHECK-NEXT: [[CMP_6:%.*]] = icmp eq i64 [[TMP0]], 42
; CHECK-NEXT: br i1 [[CMP_6]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_6:%.*]]
; CHECK: latch.6:
-; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_5]]
-; CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX_6]], align 4
+; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_5]]
+; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX_6]], align 4
; CHECK-NEXT: [[ADD_6:%.*]] = add nsw i32 [[TMP9]], [[ADD_5]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_6:%.*]] = or i64 [[INDVARS_IV]], 7
; CHECK-NEXT: br label [[FOR_EXITING_BLOCK_7:%.*]]
; CHECK-NEXT: [[CMP_7:%.*]] = icmp eq i64 [[TMP0]], 42
; CHECK-NEXT: br i1 [[CMP_7]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_7]]
; CHECK: latch.7:
-; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_6]]
-; CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX_7]], align 4
+; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_6]]
+; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX_7]], align 4
; CHECK-NEXT: [[ADD_7]] = add nsw i32 [[TMP10]], [[ADD_6]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_7]] = add i64 [[INDVARS_IV]], 8
; CHECK-NEXT: [[NITER_NEXT_7]] = add i64 [[NITER]], 8
; CHECK-NEXT: [[CMP_EPIL:%.*]] = icmp eq i64 [[TMP0]], 42
; CHECK-NEXT: br i1 [[CMP_EPIL]], label [[OTHEREXIT_LOOPEXIT3:%.*]], label [[LATCH_EPIL]]
; CHECK: latch.epil:
-; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_EPIL]]
-; CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX_EPIL]], align 4
+; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_EPIL]]
+; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX_EPIL]], align 4
; CHECK-NEXT: [[ADD_EPIL]] = add nsw i32 [[TMP11]], [[SUM_02_EPIL]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_EPIL]] = add i64 [[INDVARS_IV_EPIL]], 1
; CHECK-NEXT: [[EPIL_ITER_NEXT]] = add i64 [[EPIL_ITER]], 1
; NOUNROLL-NEXT: [[CMP:%.*]] = icmp eq i64 [[N:%.*]], 42
; NOUNROLL-NEXT: br i1 [[CMP]], label [[OTHEREXIT:%.*]], label [[LATCH]]
; NOUNROLL: latch:
-; NOUNROLL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
-; NOUNROLL-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; NOUNROLL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; NOUNROLL-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; NOUNROLL-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[SUM_02]]
; NOUNROLL-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; NOUNROLL-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
; ENABLED-NEXT: [[CMP:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP]], label [[OTHEREXIT_LOOPEXIT:%.*]], label [[LATCH:%.*]]
; ENABLED: latch:
-; ENABLED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
-; ENABLED-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; ENABLED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; ENABLED-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; ENABLED-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[SUM_02]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1
; ENABLED-NEXT: [[NITER_NEXT:%.*]] = add nuw nsw i64 [[NITER]], 1
; ENABLED-NEXT: [[CMP_1:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_1]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_1:%.*]]
; ENABLED: latch.1:
-; ENABLED-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT]]
-; ENABLED-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
+; ENABLED-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX_1]], align 4
; ENABLED-NEXT: [[ADD_1:%.*]] = add nsw i32 [[TMP4]], [[ADD]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_1:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT]], 1
; ENABLED-NEXT: [[NITER_NEXT_1:%.*]] = add nuw nsw i64 [[NITER_NEXT]], 1
; ENABLED-NEXT: [[CMP_2:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_2]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_2:%.*]]
; ENABLED: latch.2:
-; ENABLED-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_1]]
-; ENABLED-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX_2]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_1]]
+; ENABLED-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX_2]], align 4
; ENABLED-NEXT: [[ADD_2:%.*]] = add nsw i32 [[TMP5]], [[ADD_1]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_2:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_1]], 1
; ENABLED-NEXT: [[NITER_NEXT_2:%.*]] = add nuw nsw i64 [[NITER_NEXT_1]], 1
; ENABLED-NEXT: [[CMP_3:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_3]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_3:%.*]]
; ENABLED: latch.3:
-; ENABLED-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_2]]
-; ENABLED-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX_3]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_2]]
+; ENABLED-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX_3]], align 4
; ENABLED-NEXT: [[ADD_3:%.*]] = add nsw i32 [[TMP6]], [[ADD_2]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_3:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_2]], 1
; ENABLED-NEXT: [[NITER_NEXT_3:%.*]] = add nuw nsw i64 [[NITER_NEXT_2]], 1
; ENABLED-NEXT: [[CMP_4:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_4]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_4:%.*]]
; ENABLED: latch.4:
-; ENABLED-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_3]]
-; ENABLED-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX_4]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_3]]
+; ENABLED-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX_4]], align 4
; ENABLED-NEXT: [[ADD_4:%.*]] = add nsw i32 [[TMP7]], [[ADD_3]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_4:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_3]], 1
; ENABLED-NEXT: [[NITER_NEXT_4:%.*]] = add nuw nsw i64 [[NITER_NEXT_3]], 1
; ENABLED-NEXT: [[CMP_5:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_5]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_5:%.*]]
; ENABLED: latch.5:
-; ENABLED-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_4]]
-; ENABLED-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX_5]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_4]]
+; ENABLED-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX_5]], align 4
; ENABLED-NEXT: [[ADD_5:%.*]] = add nsw i32 [[TMP8]], [[ADD_4]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_5:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_4]], 1
; ENABLED-NEXT: [[NITER_NEXT_5:%.*]] = add nuw nsw i64 [[NITER_NEXT_4]], 1
; ENABLED-NEXT: [[CMP_6:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_6]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_6:%.*]]
; ENABLED: latch.6:
-; ENABLED-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_5]]
-; ENABLED-NEXT: [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX_6]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_5]]
+; ENABLED-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX_6]], align 4
; ENABLED-NEXT: [[ADD_6:%.*]] = add nsw i32 [[TMP9]], [[ADD_5]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_6:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_5]], 1
; ENABLED-NEXT: [[NITER_NEXT_6:%.*]] = add nuw nsw i64 [[NITER_NEXT_5]], 1
; ENABLED-NEXT: [[CMP_7:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_7]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_7]]
; ENABLED: latch.7:
-; ENABLED-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_6]]
-; ENABLED-NEXT: [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX_7]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_6]]
+; ENABLED-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX_7]], align 4
; ENABLED-NEXT: [[ADD_7]] = add nsw i32 [[TMP10]], [[ADD_6]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_7]] = add i64 [[INDVARS_IV_NEXT_6]], 1
; ENABLED-NEXT: [[NITER_NEXT_7]] = add i64 [[NITER_NEXT_6]], 1
; ENABLED-NEXT: [[CMP_EPIL:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_EPIL]], label [[OTHEREXIT_LOOPEXIT3:%.*]], label [[LATCH_EPIL]]
; ENABLED: latch.epil:
-; ENABLED-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_EPIL]]
-; ENABLED-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX_EPIL]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_EPIL]]
+; ENABLED-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX_EPIL]], align 4
; ENABLED-NEXT: [[ADD_EPIL]] = add nsw i32 [[TMP11]], [[SUM_02_EPIL]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_EPIL]] = add i64 [[INDVARS_IV_EPIL]], 1
; ENABLED-NEXT: [[EXITCOND_EPIL:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_EPIL]], [[N]]
br i1 %cmp, label %otherexit, label %latch
latch:
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, %n
}
; the exit block is not a deopt block.
-define i32 @test2(i32* nocapture %a, i64 %n) {
+define i32 @test2(ptr nocapture %a, i64 %n) {
;
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[N:%.*]], 42
; CHECK-NEXT: br i1 [[CMP]], label [[OTHEREXIT:%.*]], label [[LATCH]]
; CHECK: latch:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[SUM_02]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
; NOUNROLL-NEXT: [[CMP:%.*]] = icmp eq i64 [[N:%.*]], 42
; NOUNROLL-NEXT: br i1 [[CMP]], label [[OTHEREXIT:%.*]], label [[LATCH]]
; NOUNROLL: latch:
-; NOUNROLL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
-; NOUNROLL-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; NOUNROLL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; NOUNROLL-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; NOUNROLL-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[SUM_02]]
; NOUNROLL-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; NOUNROLL-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
; ENABLED-NEXT: [[CMP:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP]], label [[OTHEREXIT_LOOPEXIT:%.*]], label [[LATCH:%.*]]
; ENABLED: latch:
-; ENABLED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
-; ENABLED-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; ENABLED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; ENABLED-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; ENABLED-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[SUM_02]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1
; ENABLED-NEXT: [[NITER_NEXT:%.*]] = add nuw nsw i64 [[NITER]], 1
; ENABLED-NEXT: [[CMP_1:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_1]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_1:%.*]]
; ENABLED: latch.1:
-; ENABLED-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT]]
-; ENABLED-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
+; ENABLED-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX_1]], align 4
; ENABLED-NEXT: [[ADD_1:%.*]] = add nsw i32 [[TMP4]], [[ADD]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_1:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT]], 1
; ENABLED-NEXT: [[NITER_NEXT_1:%.*]] = add nuw nsw i64 [[NITER_NEXT]], 1
; ENABLED-NEXT: [[CMP_2:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_2]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_2:%.*]]
; ENABLED: latch.2:
-; ENABLED-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_1]]
-; ENABLED-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX_2]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_1]]
+; ENABLED-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX_2]], align 4
; ENABLED-NEXT: [[ADD_2:%.*]] = add nsw i32 [[TMP5]], [[ADD_1]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_2:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_1]], 1
; ENABLED-NEXT: [[NITER_NEXT_2:%.*]] = add nuw nsw i64 [[NITER_NEXT_1]], 1
; ENABLED-NEXT: [[CMP_3:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_3]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_3:%.*]]
; ENABLED: latch.3:
-; ENABLED-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_2]]
-; ENABLED-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX_3]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_2]]
+; ENABLED-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX_3]], align 4
; ENABLED-NEXT: [[ADD_3:%.*]] = add nsw i32 [[TMP6]], [[ADD_2]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_3:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_2]], 1
; ENABLED-NEXT: [[NITER_NEXT_3:%.*]] = add nuw nsw i64 [[NITER_NEXT_2]], 1
; ENABLED-NEXT: [[CMP_4:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_4]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_4:%.*]]
; ENABLED: latch.4:
-; ENABLED-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_3]]
-; ENABLED-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX_4]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_3]]
+; ENABLED-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX_4]], align 4
; ENABLED-NEXT: [[ADD_4:%.*]] = add nsw i32 [[TMP7]], [[ADD_3]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_4:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_3]], 1
; ENABLED-NEXT: [[NITER_NEXT_4:%.*]] = add nuw nsw i64 [[NITER_NEXT_3]], 1
; ENABLED-NEXT: [[CMP_5:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_5]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_5:%.*]]
; ENABLED: latch.5:
-; ENABLED-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_4]]
-; ENABLED-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX_5]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_4]]
+; ENABLED-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX_5]], align 4
; ENABLED-NEXT: [[ADD_5:%.*]] = add nsw i32 [[TMP8]], [[ADD_4]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_5:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_4]], 1
; ENABLED-NEXT: [[NITER_NEXT_5:%.*]] = add nuw nsw i64 [[NITER_NEXT_4]], 1
; ENABLED-NEXT: [[CMP_6:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_6]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_6:%.*]]
; ENABLED: latch.6:
-; ENABLED-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_5]]
-; ENABLED-NEXT: [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX_6]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_5]]
+; ENABLED-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX_6]], align 4
; ENABLED-NEXT: [[ADD_6:%.*]] = add nsw i32 [[TMP9]], [[ADD_5]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_6:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_5]], 1
; ENABLED-NEXT: [[NITER_NEXT_6:%.*]] = add nuw nsw i64 [[NITER_NEXT_5]], 1
; ENABLED-NEXT: [[CMP_7:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_7]], label [[OTHEREXIT_LOOPEXIT]], label [[LATCH_7]]
; ENABLED: latch.7:
-; ENABLED-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_6]]
-; ENABLED-NEXT: [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX_7]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_6]]
+; ENABLED-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX_7]], align 4
; ENABLED-NEXT: [[ADD_7]] = add nsw i32 [[TMP10]], [[ADD_6]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_7]] = add i64 [[INDVARS_IV_NEXT_6]], 1
; ENABLED-NEXT: [[NITER_NEXT_7]] = add i64 [[NITER_NEXT_6]], 1
; ENABLED-NEXT: [[CMP_EPIL:%.*]] = icmp eq i64 [[N]], 42
; ENABLED-NEXT: br i1 [[CMP_EPIL]], label [[OTHEREXIT_LOOPEXIT2:%.*]], label [[LATCH_EPIL]]
; ENABLED: latch.epil:
-; ENABLED-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_EPIL]]
-; ENABLED-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX_EPIL]], align 4
+; ENABLED-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_EPIL]]
+; ENABLED-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX_EPIL]], align 4
; ENABLED-NEXT: [[ADD_EPIL]] = add nsw i32 [[TMP11]], [[SUM_02_EPIL]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT_EPIL]] = add i64 [[INDVARS_IV_EPIL]], 1
; ENABLED-NEXT: [[EXITCOND_EPIL:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_EPIL]], [[N]]
br i1 %cmp, label %otherexit, label %latch
latch:
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, %n
; A multiple exit loop with an estimated trip count which is small, and thus
; the loop is not worth unrolling. We probably should peel said loop, but
; currently don't.
-define i32 @test3(i32* nocapture %a, i64 %n) !prof !{!"function_entry_count", i64 2048} {
+define i32 @test3(ptr nocapture %a, i64 %n) !prof !{!"function_entry_count", i64 2048} {
; CHECK-LABEL: @test3(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[HEADER:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[N:%.*]], 42
; CHECK-NEXT: br i1 [[CMP]], label [[OTHEREXIT:%.*]], label [[LATCH]]
; CHECK: latch:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[SUM_02]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
; NOUNROLL-NEXT: [[CMP:%.*]] = icmp eq i64 [[N:%.*]], 42
; NOUNROLL-NEXT: br i1 [[CMP]], label [[OTHEREXIT:%.*]], label [[LATCH]]
; NOUNROLL: latch:
-; NOUNROLL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
-; NOUNROLL-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; NOUNROLL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; NOUNROLL-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; NOUNROLL-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[SUM_02]]
; NOUNROLL-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; NOUNROLL-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
; ENABLED-NEXT: [[CMP:%.*]] = icmp eq i64 [[N:%.*]], 42
; ENABLED-NEXT: br i1 [[CMP]], label [[OTHEREXIT:%.*]], label [[LATCH]]
; ENABLED: latch:
-; ENABLED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
-; ENABLED-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; ENABLED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; ENABLED-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; ENABLED-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[SUM_02]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; ENABLED-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
br i1 %cmp, label %otherexit, label %latch
latch:
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, %n
; A case noticed while writing test3 where changing the early exit condition
; seems to inhibit unrolling for some unclear reason.
-define i32 @test4(i32* nocapture %a, i64 %n) !prof !{!"function_entry_count", i64 2048} {
+define i32 @test4(ptr nocapture %a, i64 %n) !prof !{!"function_entry_count", i64 2048} {
;
; CHECK-LABEL: @test4(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[INDVARS_IV]], 4096
; CHECK-NEXT: br i1 [[CMP]], label [[OTHEREXIT:%.*]], label [[LATCH]]
; CHECK: latch:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[SUM_02]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
; NOUNROLL-NEXT: [[CMP:%.*]] = icmp eq i64 [[INDVARS_IV]], 4096
; NOUNROLL-NEXT: br i1 [[CMP]], label [[OTHEREXIT:%.*]], label [[LATCH]]
; NOUNROLL: latch:
-; NOUNROLL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
-; NOUNROLL-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; NOUNROLL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; NOUNROLL-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; NOUNROLL-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[SUM_02]]
; NOUNROLL-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; NOUNROLL-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
; ENABLED-NEXT: [[CMP:%.*]] = icmp eq i64 [[INDVARS_IV]], 4096
; ENABLED-NEXT: br i1 [[CMP]], label [[OTHEREXIT:%.*]], label [[LATCH]]
; ENABLED: latch:
-; ENABLED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
-; ENABLED-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; ENABLED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
+; ENABLED-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; ENABLED-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[SUM_02]]
; ENABLED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; ENABLED-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
br i1 %cmp, label %otherexit, label %latch
latch:
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, %n
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
@global = dso_local local_unnamed_addr global i32 0, align 4
-@global.1 = dso_local local_unnamed_addr global i8* null, align 4
+@global.1 = dso_local local_unnamed_addr global ptr null, align 4
; Check that loop in hoge_3, with a runtime upperbound of 3, is not unrolled.
define dso_local void @hoge_3(i8 %arg) {
; CHECK-LABEL: @hoge_3(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[X:%.*]] = load i32, i32* @global, align 4
-; CHECK-NEXT: [[Y:%.*]] = load i8*, i8** @global.1, align 4
+; CHECK-NEXT: [[X:%.*]] = load i32, ptr @global, align 4
+; CHECK-NEXT: [[Y:%.*]] = load ptr, ptr @global.1, align 4
; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 [[X]], 17
; CHECK-NEXT: br i1 [[TMP0]], label [[LOOP_PREHEADER:%.*]], label [[EXIT:%.*]]
; CHECK: loop.preheader:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[X]], [[LOOP_PREHEADER]] ]
-; CHECK-NEXT: [[PTR:%.*]] = phi i8* [ [[PTR_NEXT:%.*]], [[LOOP]] ], [ [[Y]], [[LOOP_PREHEADER]] ]
+; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[PTR_NEXT:%.*]], [[LOOP]] ], [ [[Y]], [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 8
-; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, i8* [[PTR]], i32 1
-; CHECK-NEXT: store i8 [[ARG:%.*]], i8* [[PTR_NEXT]], align 1
+; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i32 1
+; CHECK-NEXT: store i8 [[ARG:%.*]], ptr [[PTR_NEXT]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[IV_NEXT]], 17
; CHECK-NEXT: br i1 [[TMP1]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
; CHECK: exit.loopexit:
;
; UPPER-LABEL: @hoge_3(
; UPPER-NEXT: entry:
-; UPPER-NEXT: [[X:%.*]] = load i32, i32* @global, align 4
-; UPPER-NEXT: [[Y:%.*]] = load i8*, i8** @global.1, align 4
+; UPPER-NEXT: [[X:%.*]] = load i32, ptr @global, align 4
+; UPPER-NEXT: [[Y:%.*]] = load ptr, ptr @global.1, align 4
; UPPER-NEXT: [[TMP0:%.*]] = icmp ult i32 [[X]], 17
; UPPER-NEXT: br i1 [[TMP0]], label [[LOOP_PREHEADER:%.*]], label [[EXIT:%.*]]
; UPPER: loop.preheader:
; UPPER-NEXT: br label [[LOOP:%.*]]
; UPPER: loop:
; UPPER-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[X]], [[LOOP_PREHEADER]] ]
-; UPPER-NEXT: [[PTR:%.*]] = phi i8* [ [[PTR_NEXT:%.*]], [[LOOP]] ], [ [[Y]], [[LOOP_PREHEADER]] ]
+; UPPER-NEXT: [[PTR:%.*]] = phi ptr [ [[PTR_NEXT:%.*]], [[LOOP]] ], [ [[Y]], [[LOOP_PREHEADER]] ]
; UPPER-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 8
-; UPPER-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, i8* [[PTR]], i32 1
-; UPPER-NEXT: store i8 [[ARG:%.*]], i8* [[PTR_NEXT]], align 1
+; UPPER-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i32 1
+; UPPER-NEXT: store i8 [[ARG:%.*]], ptr [[PTR_NEXT]], align 1
; UPPER-NEXT: [[TMP1:%.*]] = icmp ult i32 [[IV_NEXT]], 17
; UPPER-NEXT: br i1 [[TMP1]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
; UPPER: exit.loopexit:
; UPPER-NEXT: ret void
;
entry:
- %x = load i32, i32* @global, align 4
- %y = load i8*, i8** @global.1, align 4
+ %x = load i32, ptr @global, align 4
+ %y = load ptr, ptr @global.1, align 4
%0 = icmp ult i32 %x, 17
br i1 %0, label %loop, label %exit
loop:
%iv = phi i32 [ %x, %entry ], [ %iv.next, %loop ]
- %ptr = phi i8* [ %y, %entry ], [ %ptr.next, %loop ]
+ %ptr = phi ptr [ %y, %entry ], [ %ptr.next, %loop ]
%iv.next = add nuw i32 %iv, 8
- %ptr.next = getelementptr inbounds i8, i8* %ptr, i32 1
- store i8 %arg, i8* %ptr.next, align 1
+ %ptr.next = getelementptr inbounds i8, ptr %ptr, i32 1
+ store i8 %arg, ptr %ptr.next, align 1
%1 = icmp ult i32 %iv.next, 17
br i1 %1, label %loop, label %exit
define dso_local void @hoge_5(i8 %arg) {
; CHECK-LABEL: @hoge_5(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[X:%.*]] = load i32, i32* @global, align 4
-; CHECK-NEXT: [[Y:%.*]] = load i8*, i8** @global.1, align 4
+; CHECK-NEXT: [[X:%.*]] = load i32, ptr @global, align 4
+; CHECK-NEXT: [[Y:%.*]] = load ptr, ptr @global.1, align 4
; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 [[X]], 17
; CHECK-NEXT: br i1 [[TMP0]], label [[LOOP_PREHEADER:%.*]], label [[EXIT:%.*]]
; CHECK: loop.preheader:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[X]], [[LOOP_PREHEADER]] ]
-; CHECK-NEXT: [[PTR:%.*]] = phi i8* [ [[PTR_NEXT:%.*]], [[LOOP]] ], [ [[Y]], [[LOOP_PREHEADER]] ]
+; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[PTR_NEXT:%.*]], [[LOOP]] ], [ [[Y]], [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 4
-; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, i8* [[PTR]], i32 1
-; CHECK-NEXT: store i8 [[ARG:%.*]], i8* [[PTR_NEXT]], align 1
+; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i32 1
+; CHECK-NEXT: store i8 [[ARG:%.*]], ptr [[PTR_NEXT]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[IV_NEXT]], 17
; CHECK-NEXT: br i1 [[TMP1]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
; CHECK: exit.loopexit:
;
; UPPER-LABEL: @hoge_5(
; UPPER-NEXT: entry:
-; UPPER-NEXT: [[X:%.*]] = load i32, i32* @global, align 4
-; UPPER-NEXT: [[Y:%.*]] = load i8*, i8** @global.1, align 4
+; UPPER-NEXT: [[X:%.*]] = load i32, ptr @global, align 4
+; UPPER-NEXT: [[Y:%.*]] = load ptr, ptr @global.1, align 4
; UPPER-NEXT: [[TMP0:%.*]] = icmp ult i32 [[X]], 17
; UPPER-NEXT: br i1 [[TMP0]], label [[LOOP_PREHEADER:%.*]], label [[EXIT:%.*]]
; UPPER: loop.preheader:
; UPPER-NEXT: br label [[LOOP:%.*]]
; UPPER: loop:
; UPPER-NEXT: [[IV_NEXT:%.*]] = add nuw i32 [[X]], 4
-; UPPER-NEXT: [[PTR_NEXT:%.*]] = getelementptr inbounds i8, i8* [[Y]], i32 1
-; UPPER-NEXT: store i8 [[ARG:%.*]], i8* [[PTR_NEXT]], align 1
+; UPPER-NEXT: [[PTR_NEXT:%.*]] = getelementptr inbounds i8, ptr [[Y]], i32 1
+; UPPER-NEXT: store i8 [[ARG:%.*]], ptr [[PTR_NEXT]], align 1
; UPPER-NEXT: [[TMP1:%.*]] = icmp ult i32 [[IV_NEXT]], 17
; UPPER-NEXT: br i1 [[TMP1]], label [[LOOP_1:%.*]], label [[EXIT_LOOPEXIT:%.*]]
; UPPER: loop.1:
; UPPER-NEXT: [[IV_NEXT_1:%.*]] = add nuw i32 [[IV_NEXT]], 4
-; UPPER-NEXT: [[PTR_NEXT_1:%.*]] = getelementptr inbounds i8, i8* [[PTR_NEXT]], i32 1
-; UPPER-NEXT: store i8 [[ARG]], i8* [[PTR_NEXT_1]], align 1
+; UPPER-NEXT: [[PTR_NEXT_1:%.*]] = getelementptr inbounds i8, ptr [[PTR_NEXT]], i32 1
+; UPPER-NEXT: store i8 [[ARG]], ptr [[PTR_NEXT_1]], align 1
; UPPER-NEXT: [[TMP2:%.*]] = icmp ult i32 [[IV_NEXT_1]], 17
; UPPER-NEXT: br i1 [[TMP2]], label [[LOOP_2:%.*]], label [[EXIT_LOOPEXIT]]
; UPPER: loop.2:
; UPPER-NEXT: [[IV_NEXT_2:%.*]] = add nuw i32 [[IV_NEXT_1]], 4
-; UPPER-NEXT: [[PTR_NEXT_2:%.*]] = getelementptr inbounds i8, i8* [[PTR_NEXT_1]], i32 1
-; UPPER-NEXT: store i8 [[ARG]], i8* [[PTR_NEXT_2]], align 1
+; UPPER-NEXT: [[PTR_NEXT_2:%.*]] = getelementptr inbounds i8, ptr [[PTR_NEXT_1]], i32 1
+; UPPER-NEXT: store i8 [[ARG]], ptr [[PTR_NEXT_2]], align 1
; UPPER-NEXT: [[TMP3:%.*]] = icmp ult i32 [[IV_NEXT_2]], 17
; UPPER-NEXT: br i1 [[TMP3]], label [[LOOP_3:%.*]], label [[EXIT_LOOPEXIT]]
; UPPER: loop.3:
; UPPER-NEXT: [[IV_NEXT_3:%.*]] = add nuw i32 [[IV_NEXT_2]], 4
-; UPPER-NEXT: [[PTR_NEXT_3:%.*]] = getelementptr inbounds i8, i8* [[PTR_NEXT_2]], i32 1
-; UPPER-NEXT: store i8 [[ARG]], i8* [[PTR_NEXT_3]], align 1
+; UPPER-NEXT: [[PTR_NEXT_3:%.*]] = getelementptr inbounds i8, ptr [[PTR_NEXT_2]], i32 1
+; UPPER-NEXT: store i8 [[ARG]], ptr [[PTR_NEXT_3]], align 1
; UPPER-NEXT: [[TMP4:%.*]] = icmp ult i32 [[IV_NEXT_3]], 17
; UPPER-NEXT: br i1 [[TMP4]], label [[LOOP_4:%.*]], label [[EXIT_LOOPEXIT]]
; UPPER: loop.4:
; UPPER-NEXT: [[IV_NEXT_4:%.*]] = add nuw i32 [[IV_NEXT_3]], 4
-; UPPER-NEXT: [[PTR_NEXT_4:%.*]] = getelementptr inbounds i8, i8* [[PTR_NEXT_3]], i32 1
-; UPPER-NEXT: store i8 [[ARG]], i8* [[PTR_NEXT_4]], align 1
+; UPPER-NEXT: [[PTR_NEXT_4:%.*]] = getelementptr inbounds i8, ptr [[PTR_NEXT_3]], i32 1
+; UPPER-NEXT: store i8 [[ARG]], ptr [[PTR_NEXT_4]], align 1
; UPPER-NEXT: [[TMP5:%.*]] = icmp ult i32 [[IV_NEXT_4]], 17
; UPPER-NEXT: br i1 [[TMP5]], label [[LOOP_5:%.*]], label [[EXIT_LOOPEXIT]]
; UPPER: loop.5:
-; UPPER-NEXT: [[PTR_NEXT_5:%.*]] = getelementptr inbounds i8, i8* [[PTR_NEXT_4]], i32 1
-; UPPER-NEXT: store i8 [[ARG]], i8* [[PTR_NEXT_5]], align 1
+; UPPER-NEXT: [[PTR_NEXT_5:%.*]] = getelementptr inbounds i8, ptr [[PTR_NEXT_4]], i32 1
+; UPPER-NEXT: store i8 [[ARG]], ptr [[PTR_NEXT_5]], align 1
; UPPER-NEXT: br label [[EXIT_LOOPEXIT]]
; UPPER: exit.loopexit:
; UPPER-NEXT: br label [[EXIT]]
; UPPER-NEXT: ret void
;
entry:
- %x = load i32, i32* @global, align 4
- %y = load i8*, i8** @global.1, align 4
+ %x = load i32, ptr @global, align 4
+ %y = load ptr, ptr @global.1, align 4
%0 = icmp ult i32 %x, 17
br i1 %0, label %loop, label %exit
loop:
%iv = phi i32 [ %x, %entry ], [ %iv.next, %loop ]
- %ptr = phi i8* [ %y, %entry ], [ %ptr.next, %loop ]
+ %ptr = phi ptr [ %y, %entry ], [ %ptr.next, %loop ]
%iv.next = add nuw i32 %iv, 4
- %ptr.next = getelementptr inbounds i8, i8* %ptr, i32 1
- store i8 %arg, i8* %ptr.next, align 1
+ %ptr.next = getelementptr inbounds i8, ptr %ptr, i32 1
+ store i8 %arg, ptr %ptr.next, align 1
%1 = icmp ult i32 %iv.next, 17
br i1 %1, label %loop, label %exit
; Make sure the loop is unrolled without a remainder loop based on an assumption
; that the least significant bit is known to be zero.
-define dso_local void @assumeDivisibleTC(i8* noalias nocapture %a, i8* noalias nocapture readonly %b, i32 %p, i32 %q) local_unnamed_addr {
+define dso_local void @assumeDivisibleTC(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, i32 %p, i32 %q) local_unnamed_addr {
; CHECK-LABEL: @assumeDivisibleTC(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[AND:%.*]] = and i32 [[P:%.*]], 1
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INC_1:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[B:%.*]], i32 [[I_011]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i8, i8* [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i32 [[I_011]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[TMP0]], 3
-; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, i8* [[A:%.*]], i32 [[I_011]]
-; CHECK-NEXT: store i8 [[ADD]], i8* [[ARRAYIDX4]], align 1
+; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i32 [[I_011]]
+; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX4]], align 1
; CHECK-NEXT: [[INC:%.*]] = add nuw nsw i32 [[I_011]], 1
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, i8* [[B]], i32 [[INC]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* [[ARRAYIDX_1]], align 1
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[INC]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX_1]], align 1
; CHECK-NEXT: [[ADD_1:%.*]] = add i8 [[TMP1]], 3
-; CHECK-NEXT: [[ARRAYIDX4_1:%.*]] = getelementptr inbounds i8, i8* [[A]], i32 [[INC]]
-; CHECK-NEXT: store i8 [[ADD_1]], i8* [[ARRAYIDX4_1]], align 1
+; CHECK-NEXT: [[ARRAYIDX4_1:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[INC]]
+; CHECK-NEXT: store i8 [[ADD_1]], ptr [[ARRAYIDX4_1]], align 1
; CHECK-NEXT: [[INC_1]] = add nuw nsw i32 [[INC]], 1
; CHECK-NEXT: [[CMP1_1:%.*]] = icmp slt i32 [[INC_1]], [[N]]
; CHECK-NEXT: br i1 [[CMP1_1]], label [[FOR_BODY]], label [[EXIT_LOOPEXIT:%.*]], !llvm.loop [[LOOP0:![0-9]+]]
for.body:
%i.011 = phi i32 [ %inc, %for.body ], [ 0, %guarded ]
- %arrayidx = getelementptr inbounds i8, i8* %b, i32 %i.011
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %b, i32 %i.011
+ %0 = load i8, ptr %arrayidx, align 1
%add = add i8 %0, 3
- %arrayidx4 = getelementptr inbounds i8, i8* %a, i32 %i.011
- store i8 %add, i8* %arrayidx4, align 1
+ %arrayidx4 = getelementptr inbounds i8, ptr %a, i32 %i.011
+ store i8 %add, ptr %arrayidx4, align 1
%inc = add nuw nsw i32 %i.011, 1
%cmp1 = icmp slt i32 %inc, %n
br i1 %cmp1, label %for.body, label %exit
; Make sure the loop is unrolled with a remainder loop when the trip-count
; is not provably divisible by the unroll factor.
-define dso_local void @cannotProveDivisibleTC(i8* noalias nocapture %a, i8* noalias nocapture readonly %b, i32 %p, i32 %q) local_unnamed_addr {
+define dso_local void @cannotProveDivisibleTC(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, i32 %p, i32 %q) local_unnamed_addr {
; CHECK-LABEL: @cannotProveDivisibleTC(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[AND:%.*]] = and i32 [[P:%.*]], 6
; CHECK: for.body:
; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER_NEW]] ], [ [[INC_1:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[NITER:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER_NEW]] ], [ [[NITER_NEXT_1:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[B:%.*]], i32 [[I_011]]
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i32 [[I_011]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[TMP2]], 3
-; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, i8* [[A:%.*]], i32 [[I_011]]
-; CHECK-NEXT: store i8 [[ADD]], i8* [[ARRAYIDX4]], align 1
+; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i32 [[I_011]]
+; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX4]], align 1
; CHECK-NEXT: [[INC:%.*]] = add nuw nsw i32 [[I_011]], 1
; CHECK-NEXT: [[NITER_NEXT:%.*]] = add nuw nsw i32 [[NITER]], 1
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, i8* [[B]], i32 [[INC]]
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, i8* [[ARRAYIDX_1]], align 1
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[INC]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX_1]], align 1
; CHECK-NEXT: [[ADD_1:%.*]] = add i8 [[TMP3]], 3
-; CHECK-NEXT: [[ARRAYIDX4_1:%.*]] = getelementptr inbounds i8, i8* [[A]], i32 [[INC]]
-; CHECK-NEXT: store i8 [[ADD_1]], i8* [[ARRAYIDX4_1]], align 1
+; CHECK-NEXT: [[ARRAYIDX4_1:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[INC]]
+; CHECK-NEXT: store i8 [[ADD_1]], ptr [[ARRAYIDX4_1]], align 1
; CHECK-NEXT: [[INC_1]] = add nuw nsw i32 [[INC]], 1
; CHECK-NEXT: [[NITER_NEXT_1]] = add i32 [[NITER_NEXT]], 1
; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp ne i32 [[NITER_NEXT_1]], [[UNROLL_ITER]]
; CHECK: for.body.epil.preheader:
; CHECK-NEXT: br label [[FOR_BODY_EPIL:%.*]]
; CHECK: for.body.epil:
-; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i8, i8* [[B]], i32 [[I_011_UNR]]
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, i8* [[ARRAYIDX_EPIL]], align 1
+; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[I_011_UNR]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[ARRAYIDX_EPIL]], align 1
; CHECK-NEXT: [[ADD_EPIL:%.*]] = add i8 [[TMP4]], 3
-; CHECK-NEXT: [[ARRAYIDX4_EPIL:%.*]] = getelementptr inbounds i8, i8* [[A]], i32 [[I_011_UNR]]
-; CHECK-NEXT: store i8 [[ADD_EPIL]], i8* [[ARRAYIDX4_EPIL]], align 1
+; CHECK-NEXT: [[ARRAYIDX4_EPIL:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[I_011_UNR]]
+; CHECK-NEXT: store i8 [[ADD_EPIL]], ptr [[ARRAYIDX4_EPIL]], align 1
; CHECK-NEXT: br label [[EXIT_LOOPEXIT]]
; CHECK: exit.loopexit:
; CHECK-NEXT: br label [[EXIT]]
for.body:
%i.011 = phi i32 [ %inc, %for.body ], [ 0, %guarded ]
- %arrayidx = getelementptr inbounds i8, i8* %b, i32 %i.011
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %b, i32 %i.011
+ %0 = load i8, ptr %arrayidx, align 1
%add = add i8 %0, 3
- %arrayidx4 = getelementptr inbounds i8, i8* %a, i32 %i.011
- store i8 %add, i8* %arrayidx4, align 1
+ %arrayidx4 = getelementptr inbounds i8, ptr %a, i32 %i.011
+ store i8 %add, ptr %arrayidx4, align 1
%inc = add nuw nsw i32 %i.011, 1
%cmp1 = icmp slt i32 %inc, %n
br i1 %cmp1, label %for.body, label %exit
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -S -passes=loop-unroll,instcombine -unroll-runtime=true -unroll-count=4 -unroll-remainder | FileCheck %s
-define i32 @unroll(i32* nocapture readonly %a, i32* nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
+define i32 @unroll(ptr nocapture readonly %a, ptr nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
;
; CHECK-LABEL: @unroll(
; CHECK-NEXT: entry:
; CHECK: for.body.epil.preheader:
; CHECK-NEXT: br label [[FOR_BODY_EPIL:%.*]]
; CHECK: for.body.epil:
-; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV_UNR]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX_EPIL]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_EPIL:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV_UNR]]
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX2_EPIL]], align 4
+; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV_UNR]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX_EPIL]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_EPIL:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV_UNR]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX2_EPIL]], align 4
; CHECK-NEXT: [[MUL_EPIL:%.*]] = mul nsw i32 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[ADD_EPIL:%.*]] = add nsw i32 [[MUL_EPIL]], [[C_010_UNR]]
; CHECK-NEXT: [[EPIL_ITER_CMP_NOT:%.*]] = icmp eq i64 [[XTRAITER]], 1
; CHECK-NEXT: br i1 [[EPIL_ITER_CMP_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT_EPILOG_LCSSA:%.*]], label [[FOR_BODY_EPIL_1:%.*]]
; CHECK: for.body.epil.1:
; CHECK-NEXT: [[INDVARS_IV_NEXT_EPIL:%.*]] = add nuw nsw i64 [[INDVARS_IV_UNR]], 1
-; CHECK-NEXT: [[ARRAYIDX_EPIL_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_EPIL]]
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX_EPIL_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_EPIL_1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV_NEXT_EPIL]]
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX2_EPIL_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_EPIL_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_EPIL]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX_EPIL_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_EPIL_1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV_NEXT_EPIL]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX2_EPIL_1]], align 4
; CHECK-NEXT: [[MUL_EPIL_1:%.*]] = mul nsw i32 [[TMP4]], [[TMP3]]
; CHECK-NEXT: [[ADD_EPIL_1:%.*]] = add nsw i32 [[MUL_EPIL_1]], [[ADD_EPIL]]
; CHECK-NEXT: [[EPIL_ITER_CMP_1_NOT:%.*]] = icmp eq i64 [[XTRAITER]], 2
; CHECK-NEXT: br i1 [[EPIL_ITER_CMP_1_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT_EPILOG_LCSSA]], label [[FOR_BODY_EPIL_2:%.*]]
; CHECK: for.body.epil.2:
; CHECK-NEXT: [[INDVARS_IV_NEXT_EPIL_1:%.*]] = add nuw nsw i64 [[INDVARS_IV_UNR]], 2
-; CHECK-NEXT: [[ARRAYIDX_EPIL_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_EPIL_1]]
-; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX_EPIL_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_EPIL_2:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV_NEXT_EPIL_1]]
-; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX2_EPIL_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_EPIL_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_EPIL_1]]
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX_EPIL_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_EPIL_2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV_NEXT_EPIL_1]]
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX2_EPIL_2]], align 4
; CHECK-NEXT: [[MUL_EPIL_2:%.*]] = mul nsw i32 [[TMP6]], [[TMP5]]
; CHECK-NEXT: [[ADD_EPIL_2:%.*]] = add nsw i32 [[MUL_EPIL_2]], [[ADD_EPIL_1]]
; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT_EPILOG_LCSSA]]
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH_NEW]] ], [ [[INDVARS_IV_NEXT_3]], [[FOR_BODY]] ]
; CHECK-NEXT: [[C_010:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH_NEW]] ], [ [[ADD_3]], [[FOR_BODY]] ]
; CHECK-NEXT: [[NITER:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH_NEW]] ], [ [[NITER_NEXT_3:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], [[TMP7]]
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[MUL]], [[C_010]]
; CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = or i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT]]
-; CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV_NEXT]]
-; CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX2_1]], align 4
; CHECK-NEXT: [[MUL_1:%.*]] = mul nsw i32 [[TMP10]], [[TMP9]]
; CHECK-NEXT: [[ADD_1:%.*]] = add nsw i32 [[MUL_1]], [[ADD]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_1:%.*]] = or i64 [[INDVARS_IV]], 2
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_1]]
-; CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV_NEXT_1]]
-; CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_1]]
+; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV_NEXT_1]]
+; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX2_2]], align 4
; CHECK-NEXT: [[MUL_2:%.*]] = mul nsw i32 [[TMP12]], [[TMP11]]
; CHECK-NEXT: [[ADD_2:%.*]] = add nsw i32 [[MUL_2]], [[ADD_1]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_2:%.*]] = or i64 [[INDVARS_IV]], 3
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_2]]
-; CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV_NEXT_2]]
-; CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_2]]
+; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV_NEXT_2]]
+; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX2_3]], align 4
; CHECK-NEXT: [[MUL_3:%.*]] = mul nsw i32 [[TMP14]], [[TMP13]]
; CHECK-NEXT: [[ADD_3]] = add nsw i32 [[MUL_3]], [[ADD_2]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_3]] = add nuw nsw i64 [[INDVARS_IV]], 4
for.body:
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
%c.010 = phi i32 [ 0, %for.body.lr.ph ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %1 = load i32, i32* %arrayidx2, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %b, i64 %indvars.iv
+ %1 = load i32, ptr %arrayidx2, align 4
%mul = mul nsw i32 %1, %0
%add = add nsw i32 %mul, %c.010
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
; tests may check that SCEV is properly invalidated between passes.
; Completely unroll loops without a canonical IV.
-define i32 @sansCanonical(i32* %base) nounwind {
+define i32 @sansCanonical(ptr %base) nounwind {
; CHECK-LABEL: @sansCanonical(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK: while.body:
-; CHECK-NEXT: [[ADR:%.*]] = getelementptr inbounds i32, i32* [[BASE:%.*]], i64 9
-; CHECK-NEXT: [[TMP:%.*]] = load i32, i32* [[ADR]], align 8
-; CHECK-NEXT: [[ADR_1:%.*]] = getelementptr inbounds i32, i32* [[BASE]], i64 8
-; CHECK-NEXT: [[TMP_1:%.*]] = load i32, i32* [[ADR_1]], align 8
+; CHECK-NEXT: [[ADR:%.*]] = getelementptr inbounds i32, ptr [[BASE:%.*]], i64 9
+; CHECK-NEXT: [[TMP:%.*]] = load i32, ptr [[ADR]], align 8
+; CHECK-NEXT: [[ADR_1:%.*]] = getelementptr inbounds i32, ptr [[BASE]], i64 8
+; CHECK-NEXT: [[TMP_1:%.*]] = load i32, ptr [[ADR_1]], align 8
; CHECK-NEXT: [[SUM_NEXT_1:%.*]] = add i32 [[TMP]], [[TMP_1]]
-; CHECK-NEXT: [[ADR_2:%.*]] = getelementptr inbounds i32, i32* [[BASE]], i64 7
-; CHECK-NEXT: [[TMP_2:%.*]] = load i32, i32* [[ADR_2]], align 8
+; CHECK-NEXT: [[ADR_2:%.*]] = getelementptr inbounds i32, ptr [[BASE]], i64 7
+; CHECK-NEXT: [[TMP_2:%.*]] = load i32, ptr [[ADR_2]], align 8
; CHECK-NEXT: [[SUM_NEXT_2:%.*]] = add i32 [[SUM_NEXT_1]], [[TMP_2]]
-; CHECK-NEXT: [[ADR_3:%.*]] = getelementptr inbounds i32, i32* [[BASE]], i64 6
-; CHECK-NEXT: [[TMP_3:%.*]] = load i32, i32* [[ADR_3]], align 8
+; CHECK-NEXT: [[ADR_3:%.*]] = getelementptr inbounds i32, ptr [[BASE]], i64 6
+; CHECK-NEXT: [[TMP_3:%.*]] = load i32, ptr [[ADR_3]], align 8
; CHECK-NEXT: [[SUM_NEXT_3:%.*]] = add i32 [[SUM_NEXT_2]], [[TMP_3]]
-; CHECK-NEXT: [[ADR_4:%.*]] = getelementptr inbounds i32, i32* [[BASE]], i64 5
-; CHECK-NEXT: [[TMP_4:%.*]] = load i32, i32* [[ADR_4]], align 8
+; CHECK-NEXT: [[ADR_4:%.*]] = getelementptr inbounds i32, ptr [[BASE]], i64 5
+; CHECK-NEXT: [[TMP_4:%.*]] = load i32, ptr [[ADR_4]], align 8
; CHECK-NEXT: [[SUM_NEXT_4:%.*]] = add i32 [[SUM_NEXT_3]], [[TMP_4]]
-; CHECK-NEXT: [[ADR_5:%.*]] = getelementptr inbounds i32, i32* [[BASE]], i64 4
-; CHECK-NEXT: [[TMP_5:%.*]] = load i32, i32* [[ADR_5]], align 8
+; CHECK-NEXT: [[ADR_5:%.*]] = getelementptr inbounds i32, ptr [[BASE]], i64 4
+; CHECK-NEXT: [[TMP_5:%.*]] = load i32, ptr [[ADR_5]], align 8
; CHECK-NEXT: [[SUM_NEXT_5:%.*]] = add i32 [[SUM_NEXT_4]], [[TMP_5]]
-; CHECK-NEXT: [[ADR_6:%.*]] = getelementptr inbounds i32, i32* [[BASE]], i64 3
-; CHECK-NEXT: [[TMP_6:%.*]] = load i32, i32* [[ADR_6]], align 8
+; CHECK-NEXT: [[ADR_6:%.*]] = getelementptr inbounds i32, ptr [[BASE]], i64 3
+; CHECK-NEXT: [[TMP_6:%.*]] = load i32, ptr [[ADR_6]], align 8
; CHECK-NEXT: [[SUM_NEXT_6:%.*]] = add i32 [[SUM_NEXT_5]], [[TMP_6]]
-; CHECK-NEXT: [[ADR_7:%.*]] = getelementptr inbounds i32, i32* [[BASE]], i64 2
-; CHECK-NEXT: [[TMP_7:%.*]] = load i32, i32* [[ADR_7]], align 8
+; CHECK-NEXT: [[ADR_7:%.*]] = getelementptr inbounds i32, ptr [[BASE]], i64 2
+; CHECK-NEXT: [[TMP_7:%.*]] = load i32, ptr [[ADR_7]], align 8
; CHECK-NEXT: [[SUM_NEXT_7:%.*]] = add i32 [[SUM_NEXT_6]], [[TMP_7]]
-; CHECK-NEXT: [[ADR_8:%.*]] = getelementptr inbounds i32, i32* [[BASE]], i64 1
-; CHECK-NEXT: [[TMP_8:%.*]] = load i32, i32* [[ADR_8]], align 8
+; CHECK-NEXT: [[ADR_8:%.*]] = getelementptr inbounds i32, ptr [[BASE]], i64 1
+; CHECK-NEXT: [[TMP_8:%.*]] = load i32, ptr [[ADR_8]], align 8
; CHECK-NEXT: [[SUM_NEXT_8:%.*]] = add i32 [[SUM_NEXT_7]], [[TMP_8]]
; CHECK-NEXT: ret i32 [[SUM_NEXT_8]]
;
%iv = phi i64 [ 10, %entry ], [ %iv.next, %while.body ]
%sum = phi i32 [ 0, %entry ], [ %sum.next, %while.body ]
%iv.next = add i64 %iv, -1
- %adr = getelementptr inbounds i32, i32* %base, i64 %iv.next
- %tmp = load i32, i32* %adr, align 8
+ %adr = getelementptr inbounds i32, ptr %base, i64 %iv.next
+ %tmp = load i32, ptr %adr, align 8
%sum.next = add i32 %sum, %tmp
%iv.narrow = trunc i64 %iv.next to i32
%cmp.i65 = icmp sgt i32 %iv.narrow, 0
; SCEV unrolling properly handles loops with multiple exits. In this
; case, the computed trip count based on a canonical IV is *not* for a
; latch block.
-define i64 @earlyLoopTest(i64* %base) nounwind {
+define i64 @earlyLoopTest(ptr %base) nounwind {
; CHECK-LABEL: @earlyLoopTest(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[VAL:%.*]] = load i64, i64* [[BASE:%.*]], align 4
+; CHECK-NEXT: [[VAL:%.*]] = load i64, ptr [[BASE:%.*]], align 4
; CHECK-NEXT: br label [[TAIL:%.*]]
; CHECK: tail:
; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i64 [[VAL]], 0
; CHECK-NEXT: br i1 [[CMP2]], label [[LOOP_1:%.*]], label [[EXIT2:%.*]]
; CHECK: loop.1:
-; CHECK-NEXT: [[ADR_1:%.*]] = getelementptr i64, i64* [[BASE]], i64 1
-; CHECK-NEXT: [[VAL_1:%.*]] = load i64, i64* [[ADR_1]], align 4
+; CHECK-NEXT: [[ADR_1:%.*]] = getelementptr i64, ptr [[BASE]], i64 1
+; CHECK-NEXT: [[VAL_1:%.*]] = load i64, ptr [[ADR_1]], align 4
; CHECK-NEXT: [[S_NEXT_1:%.*]] = add i64 [[VAL]], [[VAL_1]]
; CHECK-NEXT: br label [[TAIL_1:%.*]]
; CHECK: tail.1:
; CHECK-NEXT: [[CMP2_1:%.*]] = icmp ne i64 [[VAL_1]], 0
; CHECK-NEXT: br i1 [[CMP2_1]], label [[LOOP_2:%.*]], label [[EXIT2]]
; CHECK: loop.2:
-; CHECK-NEXT: [[ADR_2:%.*]] = getelementptr i64, i64* [[BASE]], i64 2
-; CHECK-NEXT: [[VAL_2:%.*]] = load i64, i64* [[ADR_2]], align 4
+; CHECK-NEXT: [[ADR_2:%.*]] = getelementptr i64, ptr [[BASE]], i64 2
+; CHECK-NEXT: [[VAL_2:%.*]] = load i64, ptr [[ADR_2]], align 4
; CHECK-NEXT: [[S_NEXT_2:%.*]] = add i64 [[S_NEXT_1]], [[VAL_2]]
; CHECK-NEXT: br label [[TAIL_2:%.*]]
; CHECK: tail.2:
; CHECK-NEXT: [[CMP2_2:%.*]] = icmp ne i64 [[VAL_2]], 0
; CHECK-NEXT: br i1 [[CMP2_2]], label [[LOOP_3:%.*]], label [[EXIT2]]
; CHECK: loop.3:
-; CHECK-NEXT: [[ADR_3:%.*]] = getelementptr i64, i64* [[BASE]], i64 3
-; CHECK-NEXT: [[VAL_3:%.*]] = load i64, i64* [[ADR_3]], align 4
+; CHECK-NEXT: [[ADR_3:%.*]] = getelementptr i64, ptr [[BASE]], i64 3
+; CHECK-NEXT: [[VAL_3:%.*]] = load i64, ptr [[ADR_3]], align 4
; CHECK-NEXT: [[S_NEXT_3:%.*]] = add i64 [[S_NEXT_2]], [[VAL_3]]
; CHECK-NEXT: br i1 false, label [[TAIL_3:%.*]], label [[EXIT1:%.*]]
; CHECK: tail.3:
loop:
%iv = phi i64 [ 0, %entry ], [ %inc, %tail ]
%s = phi i64 [ 0, %entry ], [ %s.next, %tail ]
- %adr = getelementptr i64, i64* %base, i64 %iv
- %val = load i64, i64* %adr
+ %adr = getelementptr i64, ptr %base, i64 %iv
+ %val = load i64, ptr %adr
%s.next = add i64 %s, %val
%inc = add i64 %iv, 1
%cmp = icmp ne i64 %inc, 4
}
; SCEV properly unrolls multi-exit loops.
-define i32 @multiExit(i32* %base) nounwind {
+define i32 @multiExit(ptr %base) nounwind {
; CHECK-LABEL: @multiExit(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[L1:%.*]]
; CHECK: l1:
-; CHECK-NEXT: [[VAL:%.*]] = load i32, i32* [[BASE:%.*]], align 4
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[BASE:%.*]], align 4
; CHECK-NEXT: br i1 false, label [[L2:%.*]], label [[EXIT1:%.*]]
; CHECK: l2:
; CHECK-NEXT: ret i32 [[VAL]]
%iv2 = phi i32 [ 0, %entry ], [ %inc2, %l2 ]
%inc1 = add i32 %iv1, 1
%inc2 = add i32 %iv2, 1
- %adr = getelementptr i32, i32* %base, i32 %iv1
- %val = load i32, i32* %adr
+ %adr = getelementptr i32, ptr %base, i32 %iv1
+ %val = load i32, ptr %adr
%cmp1 = icmp slt i32 %iv1, 5
br i1 %cmp1, label %l2, label %exit1
l2:
; SCEV can unroll a multi-exit loops even if the latch block has no
; known trip count, but an early exit has a known trip count. In this
; case we must be careful not to optimize the latch branch away.
-define i32 @multiExitIncomplete(i32* %base) nounwind {
+define i32 @multiExitIncomplete(ptr %base) nounwind {
; CHECK-LABEL: @multiExitIncomplete(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[L1:%.*]]
; CHECK: l1:
-; CHECK-NEXT: [[VAL:%.*]] = load i32, i32* [[BASE:%.*]], align 4
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[BASE:%.*]], align 4
; CHECK-NEXT: br label [[L2:%.*]]
; CHECK: l2:
; CHECK-NEXT: br label [[L3:%.*]]
; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i32 [[VAL]], 0
; CHECK-NEXT: br i1 [[CMP3]], label [[L1_1:%.*]], label [[EXIT3:%.*]]
; CHECK: l1.1:
-; CHECK-NEXT: [[ADR_1:%.*]] = getelementptr i32, i32* [[BASE]], i32 1
-; CHECK-NEXT: [[VAL_1:%.*]] = load i32, i32* [[ADR_1]], align 4
+; CHECK-NEXT: [[ADR_1:%.*]] = getelementptr i32, ptr [[BASE]], i32 1
+; CHECK-NEXT: [[VAL_1:%.*]] = load i32, ptr [[ADR_1]], align 4
; CHECK-NEXT: br label [[L2_1:%.*]]
; CHECK: l2.1:
; CHECK-NEXT: br label [[L3_1:%.*]]
; CHECK-NEXT: [[CMP3_1:%.*]] = icmp ne i32 [[VAL_1]], 0
; CHECK-NEXT: br i1 [[CMP3_1]], label [[L1_2:%.*]], label [[EXIT3]]
; CHECK: l1.2:
-; CHECK-NEXT: [[ADR_2:%.*]] = getelementptr i32, i32* [[BASE]], i32 2
-; CHECK-NEXT: [[VAL_2:%.*]] = load i32, i32* [[ADR_2]], align 4
+; CHECK-NEXT: [[ADR_2:%.*]] = getelementptr i32, ptr [[BASE]], i32 2
+; CHECK-NEXT: [[VAL_2:%.*]] = load i32, ptr [[ADR_2]], align 4
; CHECK-NEXT: br label [[L2_2:%.*]]
; CHECK: l2.2:
; CHECK-NEXT: br label [[L3_2:%.*]]
; CHECK-NEXT: [[CMP3_2:%.*]] = icmp ne i32 [[VAL_2]], 0
; CHECK-NEXT: br i1 [[CMP3_2]], label [[L1_3:%.*]], label [[EXIT3]]
; CHECK: l1.3:
-; CHECK-NEXT: [[ADR_3:%.*]] = getelementptr i32, i32* [[BASE]], i32 3
-; CHECK-NEXT: [[VAL_3:%.*]] = load i32, i32* [[ADR_3]], align 4
+; CHECK-NEXT: [[ADR_3:%.*]] = getelementptr i32, ptr [[BASE]], i32 3
+; CHECK-NEXT: [[VAL_3:%.*]] = load i32, ptr [[ADR_3]], align 4
; CHECK-NEXT: br label [[L2_3:%.*]]
; CHECK: l2.3:
; CHECK-NEXT: br label [[L3_3:%.*]]
; CHECK-NEXT: [[CMP3_3:%.*]] = icmp ne i32 [[VAL_3]], 0
; CHECK-NEXT: br i1 [[CMP3_3]], label [[L1_4:%.*]], label [[EXIT3]]
; CHECK: l1.4:
-; CHECK-NEXT: [[ADR_4:%.*]] = getelementptr i32, i32* [[BASE]], i32 4
-; CHECK-NEXT: [[VAL_4:%.*]] = load i32, i32* [[ADR_4]], align 4
+; CHECK-NEXT: [[ADR_4:%.*]] = getelementptr i32, ptr [[BASE]], i32 4
+; CHECK-NEXT: [[VAL_4:%.*]] = load i32, ptr [[ADR_4]], align 4
; CHECK-NEXT: br label [[L2_4:%.*]]
; CHECK: l2.4:
; CHECK-NEXT: br label [[L3_4:%.*]]
%iv2 = phi i32 [ 0, %entry ], [ %inc2, %l3 ]
%inc1 = add i32 %iv1, 1
%inc2 = add i32 %iv2, 1
- %adr = getelementptr i32, i32* %base, i32 %iv1
- %val = load i32, i32* %adr
+ %adr = getelementptr i32, ptr %base, i32 %iv1
+ %val = load i32, ptr %adr
%cmp1 = icmp slt i32 %iv1, 5
br i1 %cmp1, label %l2, label %exit1
l2:
; iteration via the early exit. So loop unrolling cannot assume that
; the loop latch's exit count of zero is an upper bound on the number
; of iterations.
-define void @nsw_latch(i32* %a) nounwind {
+define void @nsw_latch(ptr %a) nounwind {
; CHECK-LABEL: @nsw_latch(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: return:
; CHECK-NEXT: [[B_03_LCSSA:%.*]] = phi i32 [ 0, [[FOR_COND]] ], [ 8, [[FOR_BODY_1]] ], [ 0, [[FOR_COND_1]] ]
; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 0, [[FOR_COND]] ], [ 1, [[FOR_BODY_1]] ], [ 0, [[FOR_COND_1]] ]
-; CHECK-NEXT: store i32 [[B_03_LCSSA]], i32* [[A:%.*]], align 4
+; CHECK-NEXT: store i32 [[B_03_LCSSA]], ptr [[A:%.*]], align 4
; CHECK-NEXT: ret void
;
entry:
return: ; preds = %for.body, %for.cond
%b.03.lcssa = phi i32 [ %b.03, %for.body ], [ %b.03, %for.cond ]
%retval.0 = phi i32 [ 1, %for.body ], [ 0, %for.cond ]
- store i32 %b.03.lcssa, i32* %a, align 4
+ store i32 %b.03.lcssa, ptr %a, align 4
ret void
}
; Test case for PR56044. Check that SCEVs for exit phis are properly invalidated.
-define i32 @test_pr56044(i64* %src, i32 %a) {
+define i32 @test_pr56044(ptr %src, i32 %a) {
; CHECK-LABEL: @test_pr56044(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP_1_PEEL_BEGIN:%.*]]
; CHECK-NEXT: br label [[LOOP_1_PEEL:%.*]]
; CHECK: loop.1.peel:
; CHECK-NEXT: call void @fn(i32 5)
-; CHECK-NEXT: [[L_PEEL:%.*]] = load i64, i64* [[SRC:%.*]], align 8
+; CHECK-NEXT: [[L_PEEL:%.*]] = load i64, ptr [[SRC:%.*]], align 8
; CHECK-NEXT: [[ADD_PEEL:%.*]] = add i64 [[L_PEEL]], [[L_PEEL]]
; CHECK-NEXT: [[EC_1_PEEL:%.*]] = icmp sgt i32 [[A:%.*]], 4
; CHECK-NEXT: br i1 [[EC_1_PEEL]], label [[MID:%.*]], label [[LOOP_1_PEEL_NEXT:%.*]]
; CHECK-NEXT: br label [[LOOP_1:%.*]]
; CHECK: loop.1:
; CHECK-NEXT: call void @fn(i32 18)
-; CHECK-NEXT: [[L:%.*]] = load i64, i64* [[SRC]], align 8
+; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[SRC]], align 8
; CHECK-NEXT: [[ADD:%.*]] = add i64 [[L]], [[L]]
; CHECK-NEXT: [[EC_1:%.*]] = icmp sgt i32 [[A]], 4
; CHECK-NEXT: br i1 [[EC_1]], label [[MID_LOOPEXIT:%.*]], label [[LOOP_1]], !llvm.loop [[LOOP0:![0-9]+]]
loop.1:
%p.1 = phi i32 [ 5, %entry ], [ 18, %loop.1 ]
call void @fn(i32 %p.1)
- %l = load i64, i64* %src, align 8
+ %l = load i64, ptr %src, align 8
%add = add i64 %l, %l
%ec.1 = icmp sgt i32 %a, 4
br i1 %ec.1, label %mid, label %loop.1
; RUN: opt < %s -passes=loop-unroll -unroll-count=2 -S | FileCheck %s
; LoopUnroll should unroll this loop into one big basic block.
-define void @latch_exit(double* nocapture %p, i64 %n) nounwind {
+define void @latch_exit(ptr nocapture %p, i64 %n) nounwind {
; CHECK-LABEL: @latch_exit(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[MUL10:%.*]] = shl i64 [[N:%.*]], 1
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_013:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[TMP16_1:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr double, double* [[P:%.*]], i64 [[I_013]]
+; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr double, ptr [[P:%.*]], i64 [[I_013]]
; CHECK-NEXT: [[TMP16:%.*]] = add nuw nsw i64 [[I_013]], 1
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr double, double* [[P]], i64 [[TMP16]]
-; CHECK-NEXT: [[TMP4:%.*]] = load double, double* [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load double, double* [[ARRAYIDX7]], align 8
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr double, ptr [[P]], i64 [[TMP16]]
+; CHECK-NEXT: [[TMP4:%.*]] = load double, ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load double, ptr [[ARRAYIDX7]], align 8
; CHECK-NEXT: [[MUL9:%.*]] = fmul double [[TMP8]], [[TMP4]]
-; CHECK-NEXT: store double [[MUL9]], double* [[ARRAYIDX7]], align 8
-; CHECK-NEXT: [[ARRAYIDX7_1:%.*]] = getelementptr double, double* [[P]], i64 [[TMP16]]
+; CHECK-NEXT: store double [[MUL9]], ptr [[ARRAYIDX7]], align 8
+; CHECK-NEXT: [[ARRAYIDX7_1:%.*]] = getelementptr double, ptr [[P]], i64 [[TMP16]]
; CHECK-NEXT: [[TMP16_1]] = add i64 [[TMP16]], 1
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr double, double* [[P]], i64 [[TMP16_1]]
-; CHECK-NEXT: [[TMP4_1:%.*]] = load double, double* [[ARRAYIDX_1]], align 8
-; CHECK-NEXT: [[TMP8_1:%.*]] = load double, double* [[ARRAYIDX7_1]], align 8
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr double, ptr [[P]], i64 [[TMP16_1]]
+; CHECK-NEXT: [[TMP4_1:%.*]] = load double, ptr [[ARRAYIDX_1]], align 8
+; CHECK-NEXT: [[TMP8_1:%.*]] = load double, ptr [[ARRAYIDX7_1]], align 8
; CHECK-NEXT: [[MUL9_1:%.*]] = fmul double [[TMP8_1]], [[TMP4_1]]
-; CHECK-NEXT: store double [[MUL9_1]], double* [[ARRAYIDX7_1]], align 8
+; CHECK-NEXT: store double [[MUL9_1]], ptr [[ARRAYIDX7_1]], align 8
; CHECK-NEXT: [[EXITCOND_1:%.*]] = icmp eq i64 [[TMP16_1]], [[MUL10]]
; CHECK-NEXT: br i1 [[EXITCOND_1]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: for.end:
for.body:
%i.013 = phi i64 [ %tmp16, %for.body ], [ 0, %entry ]
- %arrayidx7 = getelementptr double, double* %p, i64 %i.013
+ %arrayidx7 = getelementptr double, ptr %p, i64 %i.013
%tmp16 = add i64 %i.013, 1
- %arrayidx = getelementptr double, double* %p, i64 %tmp16
- %tmp4 = load double, double* %arrayidx
- %tmp8 = load double, double* %arrayidx7
+ %arrayidx = getelementptr double, ptr %p, i64 %tmp16
+ %tmp4 = load double, ptr %arrayidx
+ %tmp8 = load double, ptr %arrayidx7
%mul9 = fmul double %tmp8, %tmp4
- store double %mul9, double* %arrayidx7
+ store double %mul9, ptr %arrayidx7
%exitcond = icmp eq i64 %tmp16, %mul10
br i1 %exitcond, label %for.end, label %for.body
; Same as previous test case, but with a non-latch exit. There shouldn't
; be a conditional branch after the first block.
-define void @non_latch_exit(double* nocapture %p, i64 %n) nounwind {
+define void @non_latch_exit(ptr nocapture %p, i64 %n) nounwind {
; CHECK-LABEL: @non_latch_exit(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[MUL10:%.*]] = shl i64 [[N:%.*]], 1
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_013:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[TMP16_1:%.*]], [[LATCH_1:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr double, double* [[P:%.*]], i64 [[I_013]]
+; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr double, ptr [[P:%.*]], i64 [[I_013]]
; CHECK-NEXT: [[TMP16:%.*]] = add nuw nsw i64 [[I_013]], 1
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr double, double* [[P]], i64 [[TMP16]]
-; CHECK-NEXT: [[TMP4:%.*]] = load double, double* [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load double, double* [[ARRAYIDX7]], align 8
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr double, ptr [[P]], i64 [[TMP16]]
+; CHECK-NEXT: [[TMP4:%.*]] = load double, ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load double, ptr [[ARRAYIDX7]], align 8
; CHECK-NEXT: [[MUL9:%.*]] = fmul double [[TMP8]], [[TMP4]]
-; CHECK-NEXT: store double [[MUL9]], double* [[ARRAYIDX7]], align 8
+; CHECK-NEXT: store double [[MUL9]], ptr [[ARRAYIDX7]], align 8
; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: latch:
-; CHECK-NEXT: [[ARRAYIDX7_1:%.*]] = getelementptr double, double* [[P]], i64 [[TMP16]]
+; CHECK-NEXT: [[ARRAYIDX7_1:%.*]] = getelementptr double, ptr [[P]], i64 [[TMP16]]
; CHECK-NEXT: [[TMP16_1]] = add i64 [[TMP16]], 1
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr double, double* [[P]], i64 [[TMP16_1]]
-; CHECK-NEXT: [[TMP4_1:%.*]] = load double, double* [[ARRAYIDX_1]], align 8
-; CHECK-NEXT: [[TMP8_1:%.*]] = load double, double* [[ARRAYIDX7_1]], align 8
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr double, ptr [[P]], i64 [[TMP16_1]]
+; CHECK-NEXT: [[TMP4_1:%.*]] = load double, ptr [[ARRAYIDX_1]], align 8
+; CHECK-NEXT: [[TMP8_1:%.*]] = load double, ptr [[ARRAYIDX7_1]], align 8
; CHECK-NEXT: [[MUL9_1:%.*]] = fmul double [[TMP8_1]], [[TMP4_1]]
-; CHECK-NEXT: store double [[MUL9_1]], double* [[ARRAYIDX7_1]], align 8
+; CHECK-NEXT: store double [[MUL9_1]], ptr [[ARRAYIDX7_1]], align 8
; CHECK-NEXT: [[EXITCOND_1:%.*]] = icmp eq i64 [[TMP16_1]], [[MUL10]]
; CHECK-NEXT: br i1 [[EXITCOND_1]], label [[FOR_END:%.*]], label [[LATCH_1]]
; CHECK: latch.1:
for.body:
%i.013 = phi i64 [ %tmp16, %latch ], [ 0, %entry ]
- %arrayidx7 = getelementptr double, double* %p, i64 %i.013
+ %arrayidx7 = getelementptr double, ptr %p, i64 %i.013
%tmp16 = add i64 %i.013, 1
- %arrayidx = getelementptr double, double* %p, i64 %tmp16
- %tmp4 = load double, double* %arrayidx
- %tmp8 = load double, double* %arrayidx7
+ %arrayidx = getelementptr double, ptr %p, i64 %tmp16
+ %tmp4 = load double, ptr %arrayidx
+ %tmp8 = load double, ptr %arrayidx7
%mul9 = fmul double %tmp8, %tmp4
- store double %mul9, double* %arrayidx7
+ store double %mul9, ptr %arrayidx7
%exitcond = icmp eq i64 %tmp16, %mul10
br i1 %exitcond, label %for.end, label %latch
; Function Attrs: nounwind uwtable
define void @_Z3fn1v() #0 {
entry:
- %tmp = load i32, i32* @b, align 4
+ %tmp = load i32, ptr @b, align 4
%tobool20 = icmp eq i32 %tmp, 0
br i1 %tobool20, label %for.end6, label %for.body.lr.ph
br label %for.body
for.cond1.for.cond.loopexit_crit_edge: ; preds = %for.inc
- %add.ptr.lcssa = phi i16* [ %add.ptr, %for.inc ]
- %incdec.ptr.lcssa = phi i8* [ %incdec.ptr, %for.inc ]
+ %add.ptr.lcssa = phi ptr [ %add.ptr, %for.inc ]
+ %incdec.ptr.lcssa = phi ptr [ %incdec.ptr, %for.inc ]
br label %for.cond.loopexit
for.cond.loopexit: ; preds = %for.body, %for.cond1.for.cond.loopexit_crit_edge
- %r.1.lcssa = phi i16* [ %add.ptr.lcssa, %for.cond1.for.cond.loopexit_crit_edge ], [ %r.022, %for.body ]
- %a.1.lcssa = phi i8* [ %incdec.ptr.lcssa, %for.cond1.for.cond.loopexit_crit_edge ], [ %a.021, %for.body ]
- %tmp1 = load i32, i32* @b, align 4
+ %r.1.lcssa = phi ptr [ %add.ptr.lcssa, %for.cond1.for.cond.loopexit_crit_edge ], [ %r.022, %for.body ]
+ %a.1.lcssa = phi ptr [ %incdec.ptr.lcssa, %for.cond1.for.cond.loopexit_crit_edge ], [ %a.021, %for.body ]
+ %tmp1 = load i32, ptr @b, align 4
%tobool = icmp eq i32 %tmp1, 0
br i1 %tobool, label %for.cond.for.end6_crit_edge, label %for.body
for.body: ; preds = %for.cond.loopexit, %for.body.lr.ph
- %r.022 = phi i16* [ undef, %for.body.lr.ph ], [ %r.1.lcssa, %for.cond.loopexit ]
- %a.021 = phi i8* [ undef, %for.body.lr.ph ], [ %a.1.lcssa, %for.cond.loopexit ]
- %tmp2 = load i32, i32* @c, align 4
+ %r.022 = phi ptr [ undef, %for.body.lr.ph ], [ %r.1.lcssa, %for.cond.loopexit ]
+ %a.021 = phi ptr [ undef, %for.body.lr.ph ], [ %a.1.lcssa, %for.cond.loopexit ]
+ %tmp2 = load i32, ptr @c, align 4
%tobool215 = icmp eq i32 %tmp2, 0
br i1 %tobool215, label %for.cond.loopexit, label %for.body3.lr.ph
for.body3: ; preds = %for.inc, %for.body3.lr.ph
%dec18.in = phi i32 [ %tmp2, %for.body3.lr.ph ], [ %dec18, %for.inc ]
- %r.117 = phi i16* [ %r.022, %for.body3.lr.ph ], [ %add.ptr, %for.inc ]
- %a.116 = phi i8* [ %a.021, %for.body3.lr.ph ], [ %incdec.ptr, %for.inc ]
+ %r.117 = phi ptr [ %r.022, %for.body3.lr.ph ], [ %add.ptr, %for.inc ]
+ %a.116 = phi ptr [ %a.021, %for.body3.lr.ph ], [ %incdec.ptr, %for.inc ]
%dec18 = add nsw i32 %dec18.in, -1
- %tmp3 = load i8, i8* %a.116, align 1
+ %tmp3 = load i8, ptr %a.116, align 1
%cmp = icmp eq i8 %tmp3, 0
br i1 %cmp, label %if.then, label %for.inc
if.then: ; preds = %for.body3
- %arrayidx = getelementptr inbounds i16, i16* %r.117, i64 1
- store i16 0, i16* %arrayidx, align 2
- store i16 0, i16* %r.117, align 2
- %arrayidx5 = getelementptr inbounds i16, i16* %r.117, i64 2
- store i16 0, i16* %arrayidx5, align 2
+ %arrayidx = getelementptr inbounds i16, ptr %r.117, i64 1
+ store i16 0, ptr %arrayidx, align 2
+ store i16 0, ptr %r.117, align 2
+ %arrayidx5 = getelementptr inbounds i16, ptr %r.117, i64 2
+ store i16 0, ptr %arrayidx5, align 2
br label %for.inc
for.inc: ; preds = %if.then, %for.body3
- %incdec.ptr = getelementptr inbounds i8, i8* %a.116, i64 1
- %add.ptr = getelementptr inbounds i16, i16* %r.117, i64 3
+ %incdec.ptr = getelementptr inbounds i8, ptr %a.116, i64 1
+ %add.ptr = getelementptr inbounds i16, ptr %r.117, i64 3
%tobool2 = icmp eq i32 %dec18, 0
br i1 %tobool2, label %for.cond1.for.cond.loopexit_crit_edge, label %for.body3, !llvm.loop !0
target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-pc-windows-msvc18.0.0"
-define void @test1() personality i32 (...)* @__CxxFrameHandler3 {
+define void @test1() personality ptr @__CxxFrameHandler3 {
entry:
br label %for.body
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: store i32 -1, i32* @G, align 4
+; CHECK-NEXT: store i32 -1, ptr @G, align 4
; CHECK-NEXT: [[CMP_1:%.*]] = icmp eq i32 0, [[LIMIT:%.*]]
; CHECK-NEXT: [[ZEXT_1:%.*]] = sext i1 [[CMP_1]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_1]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_1]], ptr @G, align 4
; CHECK-NEXT: [[CMP_2:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_2:%.*]] = sext i1 [[CMP_2]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_2]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_2]], ptr @G, align 4
; CHECK-NEXT: [[CMP_3:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_3:%.*]] = sext i1 [[CMP_3]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_3]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_3]], ptr @G, align 4
; CHECK-NEXT: [[CMP_4:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_4:%.*]] = sext i1 [[CMP_4]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_4]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_4]], ptr @G, align 4
; CHECK-NEXT: [[CMP_5:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_5:%.*]] = sext i1 [[CMP_5]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_5]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_5]], ptr @G, align 4
; CHECK-NEXT: [[CMP_6:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_6:%.*]] = sext i1 [[CMP_6]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_6]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_6]], ptr @G, align 4
; CHECK-NEXT: [[CMP_7:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_7:%.*]] = sext i1 [[CMP_7]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_7]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_7]], ptr @G, align 4
; CHECK-NEXT: [[CMP_8:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_8:%.*]] = sext i1 [[CMP_8]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_8]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_8]], ptr @G, align 4
; CHECK-NEXT: [[CMP_9:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_9:%.*]] = sext i1 [[CMP_9]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_9]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_9]], ptr @G, align 4
; CHECK-NEXT: [[CMP_10:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_10:%.*]] = sext i1 [[CMP_10]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_10]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_10]], ptr @G, align 4
; CHECK-NEXT: [[CMP_11:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_11:%.*]] = sext i1 [[CMP_11]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_11]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_11]], ptr @G, align 4
; CHECK-NEXT: [[CMP_12:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_12:%.*]] = sext i1 [[CMP_12]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_12]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_12]], ptr @G, align 4
; CHECK-NEXT: [[CMP_13:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_13:%.*]] = sext i1 [[CMP_13]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_13]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_13]], ptr @G, align 4
; CHECK-NEXT: [[CMP_14:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_14:%.*]] = sext i1 [[CMP_14]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_14]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_14]], ptr @G, align 4
; CHECK-NEXT: [[CMP_15:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_15:%.*]] = sext i1 [[CMP_15]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_15]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_15]], ptr @G, align 4
; CHECK-NEXT: [[CMP_16:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_16:%.*]] = sext i1 [[CMP_16]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_16]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_16]], ptr @G, align 4
; CHECK-NEXT: [[CMP_17:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_17:%.*]] = sext i1 [[CMP_17]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_17]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_17]], ptr @G, align 4
; CHECK-NEXT: [[CMP_18:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_18:%.*]] = sext i1 [[CMP_18]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_18]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_18]], ptr @G, align 4
; CHECK-NEXT: [[CMP_19:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_19:%.*]] = sext i1 [[CMP_19]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_19]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_19]], ptr @G, align 4
; CHECK-NEXT: [[CMP_20:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_20:%.*]] = sext i1 [[CMP_20]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_20]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_20]], ptr @G, align 4
; CHECK-NEXT: [[CMP_21:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_21:%.*]] = sext i1 [[CMP_21]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_21]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_21]], ptr @G, align 4
; CHECK-NEXT: [[CMP_22:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_22:%.*]] = sext i1 [[CMP_22]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_22]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_22]], ptr @G, align 4
; CHECK-NEXT: [[CMP_23:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_23:%.*]] = sext i1 [[CMP_23]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_23]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_23]], ptr @G, align 4
; CHECK-NEXT: [[CMP_24:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_24:%.*]] = sext i1 [[CMP_24]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_24]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_24]], ptr @G, align 4
; CHECK-NEXT: [[CMP_25:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_25:%.*]] = sext i1 [[CMP_25]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_25]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_25]], ptr @G, align 4
; CHECK-NEXT: [[CMP_26:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_26:%.*]] = sext i1 [[CMP_26]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_26]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_26]], ptr @G, align 4
; CHECK-NEXT: [[CMP_27:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_27:%.*]] = sext i1 [[CMP_27]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_27]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_27]], ptr @G, align 4
; CHECK-NEXT: [[CMP_28:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_28:%.*]] = sext i1 [[CMP_28]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_28]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_28]], ptr @G, align 4
; CHECK-NEXT: [[CMP_29:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_29:%.*]] = sext i1 [[CMP_29]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_29]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_29]], ptr @G, align 4
; CHECK-NEXT: [[CMP_30:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_30:%.*]] = sext i1 [[CMP_30]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_30]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_30]], ptr @G, align 4
; CHECK-NEXT: [[CMP_31:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_31:%.*]] = sext i1 [[CMP_31]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_31]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_31]], ptr @G, align 4
; CHECK-NEXT: [[CMP_32:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_32:%.*]] = sext i1 [[CMP_32]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_32]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_32]], ptr @G, align 4
; CHECK-NEXT: [[CMP_33:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_33:%.*]] = sext i1 [[CMP_33]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_33]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_33]], ptr @G, align 4
; CHECK-NEXT: [[CMP_34:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_34:%.*]] = sext i1 [[CMP_34]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_34]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_34]], ptr @G, align 4
; CHECK-NEXT: [[CMP_35:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_35:%.*]] = sext i1 [[CMP_35]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_35]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_35]], ptr @G, align 4
; CHECK-NEXT: [[CMP_36:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_36:%.*]] = sext i1 [[CMP_36]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_36]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_36]], ptr @G, align 4
; CHECK-NEXT: [[CMP_37:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_37:%.*]] = sext i1 [[CMP_37]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_37]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_37]], ptr @G, align 4
; CHECK-NEXT: [[CMP_38:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_38:%.*]] = sext i1 [[CMP_38]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_38]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_38]], ptr @G, align 4
; CHECK-NEXT: [[CMP_39:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_39:%.*]] = sext i1 [[CMP_39]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_39]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_39]], ptr @G, align 4
; CHECK-NEXT: [[CMP_40:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_40:%.*]] = sext i1 [[CMP_40]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_40]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_40]], ptr @G, align 4
; CHECK-NEXT: [[CMP_41:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_41:%.*]] = sext i1 [[CMP_41]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_41]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_41]], ptr @G, align 4
; CHECK-NEXT: [[CMP_42:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_42:%.*]] = sext i1 [[CMP_42]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_42]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_42]], ptr @G, align 4
; CHECK-NEXT: [[CMP_43:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_43:%.*]] = sext i1 [[CMP_43]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_43]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_43]], ptr @G, align 4
; CHECK-NEXT: [[CMP_44:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_44:%.*]] = sext i1 [[CMP_44]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_44]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_44]], ptr @G, align 4
; CHECK-NEXT: [[CMP_45:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_45:%.*]] = sext i1 [[CMP_45]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_45]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_45]], ptr @G, align 4
; CHECK-NEXT: [[CMP_46:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_46:%.*]] = sext i1 [[CMP_46]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_46]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_46]], ptr @G, align 4
; CHECK-NEXT: [[CMP_47:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_47:%.*]] = sext i1 [[CMP_47]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_47]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_47]], ptr @G, align 4
; CHECK-NEXT: [[CMP_48:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_48:%.*]] = sext i1 [[CMP_48]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_48]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_48]], ptr @G, align 4
; CHECK-NEXT: [[CMP_49:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_49:%.*]] = sext i1 [[CMP_49]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_49]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_49]], ptr @G, align 4
; CHECK-NEXT: [[CMP_50:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_50:%.*]] = sext i1 [[CMP_50]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_50]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_50]], ptr @G, align 4
; CHECK-NEXT: [[CMP_51:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_51:%.*]] = sext i1 [[CMP_51]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_51]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_51]], ptr @G, align 4
; CHECK-NEXT: [[CMP_52:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_52:%.*]] = sext i1 [[CMP_52]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_52]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_52]], ptr @G, align 4
; CHECK-NEXT: [[CMP_53:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_53:%.*]] = sext i1 [[CMP_53]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_53]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_53]], ptr @G, align 4
; CHECK-NEXT: [[CMP_54:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_54:%.*]] = sext i1 [[CMP_54]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_54]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_54]], ptr @G, align 4
; CHECK-NEXT: [[CMP_55:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_55:%.*]] = sext i1 [[CMP_55]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_55]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_55]], ptr @G, align 4
; CHECK-NEXT: [[CMP_56:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_56:%.*]] = sext i1 [[CMP_56]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_56]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_56]], ptr @G, align 4
; CHECK-NEXT: [[CMP_57:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_57:%.*]] = sext i1 [[CMP_57]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_57]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_57]], ptr @G, align 4
; CHECK-NEXT: [[CMP_58:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_58:%.*]] = sext i1 [[CMP_58]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_58]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_58]], ptr @G, align 4
; CHECK-NEXT: [[CMP_59:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_59:%.*]] = sext i1 [[CMP_59]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_59]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_59]], ptr @G, align 4
; CHECK-NEXT: [[CMP_60:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_60:%.*]] = sext i1 [[CMP_60]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_60]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_60]], ptr @G, align 4
; CHECK-NEXT: [[CMP_61:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_61:%.*]] = sext i1 [[CMP_61]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_61]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_61]], ptr @G, align 4
; CHECK-NEXT: [[CMP_62:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_62:%.*]] = sext i1 [[CMP_62]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_62]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_62]], ptr @G, align 4
; CHECK-NEXT: [[CMP_63:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_63:%.*]] = sext i1 [[CMP_63]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_63]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_63]], ptr @G, align 4
; CHECK-NEXT: [[CMP_64:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_64:%.*]] = sext i1 [[CMP_64]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_64]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_64]], ptr @G, align 4
; CHECK-NEXT: [[CMP_65:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_65:%.*]] = sext i1 [[CMP_65]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_65]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_65]], ptr @G, align 4
; CHECK-NEXT: [[CMP_66:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_66:%.*]] = sext i1 [[CMP_66]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_66]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_66]], ptr @G, align 4
; CHECK-NEXT: [[CMP_67:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_67:%.*]] = sext i1 [[CMP_67]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_67]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_67]], ptr @G, align 4
; CHECK-NEXT: [[CMP_68:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_68:%.*]] = sext i1 [[CMP_68]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_68]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_68]], ptr @G, align 4
; CHECK-NEXT: [[CMP_69:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_69:%.*]] = sext i1 [[CMP_69]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_69]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_69]], ptr @G, align 4
; CHECK-NEXT: [[CMP_70:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_70:%.*]] = sext i1 [[CMP_70]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_70]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_70]], ptr @G, align 4
; CHECK-NEXT: [[CMP_71:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_71:%.*]] = sext i1 [[CMP_71]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_71]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_71]], ptr @G, align 4
; CHECK-NEXT: [[CMP_72:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_72:%.*]] = sext i1 [[CMP_72]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_72]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_72]], ptr @G, align 4
; CHECK-NEXT: [[CMP_73:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_73:%.*]] = sext i1 [[CMP_73]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_73]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_73]], ptr @G, align 4
; CHECK-NEXT: [[CMP_74:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_74:%.*]] = sext i1 [[CMP_74]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_74]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_74]], ptr @G, align 4
; CHECK-NEXT: [[CMP_75:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_75:%.*]] = sext i1 [[CMP_75]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_75]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_75]], ptr @G, align 4
; CHECK-NEXT: [[CMP_76:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_76:%.*]] = sext i1 [[CMP_76]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_76]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_76]], ptr @G, align 4
; CHECK-NEXT: [[CMP_77:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_77:%.*]] = sext i1 [[CMP_77]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_77]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_77]], ptr @G, align 4
; CHECK-NEXT: [[CMP_78:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_78:%.*]] = sext i1 [[CMP_78]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_78]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_78]], ptr @G, align 4
; CHECK-NEXT: [[CMP_79:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_79:%.*]] = sext i1 [[CMP_79]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_79]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_79]], ptr @G, align 4
; CHECK-NEXT: [[CMP_80:%.*]] = icmp eq i32 0, [[LIMIT]]
; CHECK-NEXT: [[ZEXT_80:%.*]] = sext i1 [[CMP_80]] to i32
-; CHECK-NEXT: store i32 [[ZEXT_80]], i32* @G, align 4
+; CHECK-NEXT: store i32 [[ZEXT_80]], ptr @G, align 4
; CHECK-NEXT: ret i32 [[ZEXT_80]]
;
entry:
%sub = sub i32 %limit, %phi
%cmp = icmp eq i32 %sub, %limit
%zext = sext i1 %cmp to i32
- store i32 %zext, i32* @G
+ store i32 %zext, ptr @G
%iv.next = add i32 %iv, 1
%loop.cond = icmp ne i32 %iv, 80
br i1 %loop.cond, label %loop, label %done
; CHECK-LABEL: @foo(
; CHECK: llvm.loop.unroll.disable
-define void @foo(i32* nocapture %a) {
+define void @foo(ptr nocapture %a) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 64
br i1 %exitcond, label %for.end, label %for.body
; Loop with multiple exiting blocks, where the header exits but not the latch,
; e.g. because it has not been rotated.
-define i16 @full_unroll_multiple_exiting_blocks(i16* %A, i16 %x, i16 %y) {
+define i16 @full_unroll_multiple_exiting_blocks(ptr %A, i16 %x, i16 %y) {
; CHECK-LABEL: @full_unroll_multiple_exiting_blocks(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[HEADER:%.*]]
; CHECK: header:
-; CHECK-NEXT: [[LV:%.*]] = load i16, i16* [[A:%.*]], align 2
+; CHECK-NEXT: [[LV:%.*]] = load i16, ptr [[A:%.*]], align 2
; CHECK-NEXT: [[RES_NEXT:%.*]] = add i16 123, [[LV]]
; CHECK-NEXT: br label [[EXITING_1:%.*]]
; CHECK: exiting.1:
; CHECK-NEXT: [[EC_2:%.*]] = icmp eq i16 [[LV]], [[Y:%.*]]
; CHECK-NEXT: br i1 [[EC_2]], label [[EXIT]], label [[LATCH:%.*]]
; CHECK: latch:
-; CHECK-NEXT: [[PTR_1:%.*]] = getelementptr inbounds i16, i16* [[A]], i64 1
-; CHECK-NEXT: [[LV_1:%.*]] = load i16, i16* [[PTR_1]], align 2
+; CHECK-NEXT: [[PTR_1:%.*]] = getelementptr inbounds i16, ptr [[A]], i64 1
+; CHECK-NEXT: [[LV_1:%.*]] = load i16, ptr [[PTR_1]], align 2
; CHECK-NEXT: [[RES_NEXT_1:%.*]] = add i16 [[RES_NEXT]], [[LV_1]]
; CHECK-NEXT: br label [[EXITING_1_1:%.*]]
; CHECK: exiting.1.1:
; CHECK-NEXT: [[EC_2_1:%.*]] = icmp eq i16 [[LV_1]], [[Y]]
; CHECK-NEXT: br i1 [[EC_2_1]], label [[EXIT]], label [[LATCH_1:%.*]]
; CHECK: latch.1:
-; CHECK-NEXT: [[PTR_2:%.*]] = getelementptr inbounds i16, i16* [[A]], i64 2
-; CHECK-NEXT: [[LV_2:%.*]] = load i16, i16* [[PTR_2]], align 2
+; CHECK-NEXT: [[PTR_2:%.*]] = getelementptr inbounds i16, ptr [[A]], i64 2
+; CHECK-NEXT: [[LV_2:%.*]] = load i16, ptr [[PTR_2]], align 2
; CHECK-NEXT: [[RES_NEXT_2:%.*]] = add i16 [[RES_NEXT_1]], [[LV_2]]
; CHECK-NEXT: br label [[EXITING_1_2:%.*]]
; CHECK: exiting.1.2:
; CHECK-NEXT: [[EC_2_2:%.*]] = icmp eq i16 [[LV_2]], [[Y]]
; CHECK-NEXT: br i1 [[EC_2_2]], label [[EXIT]], label [[LATCH_2:%.*]]
; CHECK: latch.2:
-; CHECK-NEXT: [[PTR_3:%.*]] = getelementptr inbounds i16, i16* [[A]], i64 3
-; CHECK-NEXT: [[LV_3:%.*]] = load i16, i16* [[PTR_3]], align 2
+; CHECK-NEXT: [[PTR_3:%.*]] = getelementptr inbounds i16, ptr [[A]], i64 3
+; CHECK-NEXT: [[LV_3:%.*]] = load i16, ptr [[PTR_3]], align 2
; CHECK-NEXT: [[RES_NEXT_3:%.*]] = add i16 [[RES_NEXT_2]], [[LV_3]]
; CHECK-NEXT: br i1 false, label [[EXITING_1_3:%.*]], label [[EXIT]]
; CHECK: exiting.1.3:
header:
%res = phi i16 [ 123, %entry ], [ %res.next, %latch ]
%i.0 = phi i64 [ 0, %entry ], [ %inc9, %latch ]
- %ptr = getelementptr inbounds i16, i16* %A, i64 %i.0
- %lv = load i16, i16* %ptr
+ %ptr = getelementptr inbounds i16, ptr %A, i64 %i.0
+ %lv = load i16, ptr %ptr
%res.next = add i16 %res, %lv
%cmp = icmp ult i64 %i.0, 3
br i1 %cmp, label %exiting.1, label %exit
; have its loop-carried value (the load in for.cond) replaced accordingly
; after unrolling the loop.
-define i16 @full_unroll(i16* %A) {
+define i16 @full_unroll(ptr %A) {
; CHECK-LABEL: @full_unroll(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK: for.cond.cleanup3:
; CHECK-NEXT: br label [[FOR_COND_CLEANUP3_1:%.*]]
; CHECK: for.cond.cleanup3.1:
-; CHECK-NEXT: [[PTR_2:%.*]] = getelementptr inbounds i16, i16* [[A:%.*]], i64 2
-; CHECK-NEXT: [[TMP2_2]] = load i16, i16* [[PTR_2]], align 2
+; CHECK-NEXT: [[PTR_2:%.*]] = getelementptr inbounds i16, ptr [[A:%.*]], i64 2
+; CHECK-NEXT: [[TMP2_2]] = load i16, ptr [[PTR_2]], align 2
; CHECK-NEXT: br label [[FOR_COND_CLEANUP3_2]]
; CHECK: for.cond.cleanup3.2:
; CHECK-NEXT: br i1 false, label [[FOR_COND_CLEANUP3_3:%.*]], label [[FOR_COND_CLEANUP:%.*]]
for.cond: ; preds = %for.cond.cleanup3, %entry
%.lcssa10 = phi i16 [ 123, %entry ], [ %.lcssa, %for.cond.cleanup3 ]
%i.0 = phi i64 [ 0, %entry ], [ %inc9, %for.cond.cleanup3 ]
- %ptr = getelementptr inbounds i16, i16* %A, i64 %i.0
- %tmp2 = load i16, i16* %ptr
+ %ptr = getelementptr inbounds i16, ptr %A, i64 %i.0
+ %tmp2 = load i16, ptr %ptr
%cmp = icmp ult i64 %i.0, 3
br i1 %cmp, label %for.cond.cleanup3, label %for.cond.cleanup
br label %for.cond
}
-define i16 @partial_unroll(i16* %A) {
+define i16 @partial_unroll(ptr %A) {
; CHECK-LABEL: @partial_unroll(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK-NEXT: ret i16 0
; CHECK: for.cond.cleanup3:
; CHECK-NEXT: [[INC9:%.*]] = add nuw nsw i64 [[I_0]], 1
-; CHECK-NEXT: [[PTR_1:%.*]] = getelementptr inbounds i16, i16* [[A:%.*]], i64 [[INC9]]
-; CHECK-NEXT: [[TMP2_1]] = load i16, i16* [[PTR_1]], align 2
+; CHECK-NEXT: [[PTR_1:%.*]] = getelementptr inbounds i16, ptr [[A:%.*]], i64 [[INC9]]
+; CHECK-NEXT: [[TMP2_1]] = load i16, ptr [[PTR_1]], align 2
; CHECK-NEXT: br label [[FOR_COND_CLEANUP3_1]]
; CHECK: for.cond.cleanup3.1:
; CHECK-NEXT: [[INC9_1:%.*]] = add nuw nsw i64 [[INC9]], 1
for.cond: ; preds = %for.cond.cleanup3, %entry
%.lcssa10 = phi i16 [ 123, %entry ], [ %.lcssa, %for.cond.cleanup3 ]
%i.0 = phi i64 [ 0, %entry ], [ %inc9, %for.cond.cleanup3 ]
- %ptr = getelementptr inbounds i16, i16* %A, i64 %i.0
- %tmp2 = load i16, i16* %ptr
+ %ptr = getelementptr inbounds i16, ptr %A, i64 %i.0
+ %tmp2 = load i16, ptr %ptr
%cmp = icmp ult i64 %i.0, 200
br i1 %cmp, label %for.cond.cleanup3, label %for.cond.cleanup
; CHECK: %mul.2 = mul
; CHECK: %mul.3 = mul
; CHECK: loop.epil:
-define i32 @bar_prof(i32* noalias nocapture readonly %src, i64 %c) !prof !1 {
+define i32 @bar_prof(ptr noalias nocapture readonly %src, i64 %c) !prof !1 {
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %inc, %loop ]
%r = phi i32 [ 0, %entry ], [ %add, %loop ]
- %arrayidx = getelementptr inbounds i32, i32* %src, i64 %iv
- %src_element = load i32, i32* %arrayidx, align 4
- %array_const_idx = getelementptr inbounds [9 x i32], [9 x i32]* @known_constant, i64 0, i64 %iv
- %const_array_element = load i32, i32* %array_const_idx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %src, i64 %iv
+ %src_element = load i32, ptr %arrayidx, align 4
+ %array_const_idx = getelementptr inbounds [9 x i32], ptr @known_constant, i64 0, i64 %iv
+ %const_array_element = load i32, ptr %array_const_idx, align 4
%mul = mul nsw i32 %src_element, %const_array_element
%add = add nsw i32 %mul, %r
%inc = add nuw nsw i64 %iv, 1
; CHECK-LABEL: @bar_prof_flat
; CHECK-NOT: loop.epil
-define i32 @bar_prof_flat(i32* noalias nocapture readonly %src, i64 %c) !prof !1 {
+define i32 @bar_prof_flat(ptr noalias nocapture readonly %src, i64 %c) !prof !1 {
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %inc, %loop ]
%r = phi i32 [ 0, %entry ], [ %add, %loop ]
- %arrayidx = getelementptr inbounds i32, i32* %src, i64 %iv
- %src_element = load i32, i32* %arrayidx, align 4
- %array_const_idx = getelementptr inbounds [9 x i32], [9 x i32]* @known_constant, i64 0, i64 %iv
- %const_array_element = load i32, i32* %array_const_idx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %src, i64 %iv
+ %src_element = load i32, ptr %arrayidx, align 4
+ %array_const_idx = getelementptr inbounds [9 x i32], ptr @known_constant, i64 0, i64 %iv
+ %const_array_element = load i32, ptr %array_const_idx, align 4
%mul = mul nsw i32 %src_element, %const_array_element
%add = add nsw i32 %mul, %r
%inc = add nuw nsw i64 %iv, 1
; CHECK-NEXT: add
; CHECK-NEXT: icmp
; CHECK-NEXT: br
-define void @foo(i32* nocapture %a) {
+define void @foo(ptr nocapture %a) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 1024
br i1 %exitcond, label %for.end, label %for.body
for.body: ; preds = %for.body, %entry
%i.05 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds [24 x i32], [24 x i32]* @tab, i32 0, i32 %i.05
- store i32 %i.05, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [24 x i32], ptr @tab, i32 0, i32 %i.05
+ store i32 %i.05, ptr %arrayidx, align 4
%inc = add nuw nsw i32 %i.05, 1
%exitcond = icmp eq i32 %inc, 24
br i1 %exitcond, label %for.end, label %for.body
for.body: ; preds = %for.body, %entry
%i.05 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds [24 x i32], [24 x i32]* @tab, i32 0, i32 %i.05
- store i32 %i.05, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [24 x i32], ptr @tab, i32 0, i32 %i.05
+ store i32 %i.05, ptr %arrayidx, align 4
%inc = add nuw nsw i32 %i.05, 1
%exitcond = icmp eq i32 %inc, 24
br i1 %exitcond, label %for.end, label %for.body
for.body: ; preds = %for.body, %entry
%i.05 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds [24 x i32], [24 x i32]* @tab, i32 0, i32 %i.05
- store i32 %i.05, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [24 x i32], ptr @tab, i32 0, i32 %i.05
+ store i32 %i.05, ptr %arrayidx, align 4
%inc = add nuw nsw i32 %i.05, 1
%exitcond = icmp eq i32 %inc, 24
br i1 %exitcond, label %for.end, label %for.body
for.body: ; preds = %for.body, %entry
%i.05 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds [24 x i32], [24 x i32]* @tab, i32 0, i32 %i.05
- store i32 %i.05, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [24 x i32], ptr @tab, i32 0, i32 %i.05
+ store i32 %i.05, ptr %arrayidx, align 4
%inc = add nuw nsw i32 %i.05, 1
%exitcond = icmp eq i32 %inc, 24
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !15
;
; CHECK-LABEL: @unroll_count_4(
; CHECK: br i1 {{.*}}, label {{.*}}, label {{.*}}, !llvm.loop ![[LOOP_1:.*]]
-define void @unroll_count_4(i32* nocapture %a) {
+define void @unroll_count_4(ptr nocapture %a) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 64
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !1
;
; CHECK-LABEL: @unroll_full(
; CHECK: br i1 {{.*}}, label {{.*}}, label {{.*}}, !llvm.loop ![[LOOP_2:.*]]
-define void @unroll_full(i32* nocapture %a, i32 %b) {
+define void @unroll_full(ptr nocapture %a, i32 %b) {
entry:
%cmp3 = icmp sgt i32 %b, 0
br i1 %cmp3, label %for.body, label %for.end, !llvm.loop !5
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %b
;
; CHECK-LABEL: @unroll_disable(
; CHECK: br i1 {{.*}}, label {{.*}}, label {{.*}}, !llvm.loop ![[LOOP_3:.*]]
-define void @unroll_disable(i32* nocapture %a) {
+define void @unroll_disable(ptr nocapture %a) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 64
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !7
; CHECK: store i32
; CHECK: store i32
; CHECK: br i1 {{.*}}, label {{.*}}, label {{.*}}, !llvm.loop ![[LOOP_5:.*]]
-define void @shared_metadata(i32* nocapture %List) #0 {
+define void @shared_metadata(ptr nocapture %List) #0 {
entry:
br label %for.body3
for.body3: ; preds = %for.body3, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body3 ]
- %arrayidx = getelementptr inbounds i32, i32* %List, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %List, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add4 = add nsw i32 %0, 10
- store i32 %add4, i32* %arrayidx, align 4
+ store i32 %add4, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 4
br i1 %exitcond, label %for.body3.1.preheader, label %for.body3, !llvm.loop !9
for.body3.1: ; preds = %for.body3.1.preheader, %for.body3.1
%indvars.iv.1 = phi i64 [ %1, %for.body3.1 ], [ 0, %for.body3.1.preheader ]
%1 = add nsw i64 %indvars.iv.1, 1
- %arrayidx.1 = getelementptr inbounds i32, i32* %List, i64 %1
- %2 = load i32, i32* %arrayidx.1, align 4
+ %arrayidx.1 = getelementptr inbounds i32, ptr %List, i64 %1
+ %2 = load i32, ptr %arrayidx.1, align 4
%add4.1 = add nsw i32 %2, 10
- store i32 %add4.1, i32* %arrayidx.1, align 4
+ store i32 %add4.1, ptr %arrayidx.1, align 4
%exitcond.1 = icmp eq i64 %1, 4
br i1 %exitcond.1, label %for.inc5.1, label %for.body3.1, !llvm.loop !9
;
; CHECK-LABEL: @loop4(
; CHECK-NOT: br i1
-define void @loop4(i32* nocapture %a) {
+define void @loop4(ptr nocapture %a) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 4
br i1 %exitcond, label %for.end, label %for.body
; CHECK: store i32
; CHECK-NOT: store i32
; CHECK: br i1
-define void @loop4_with_disable(i32* nocapture %a) {
+define void @loop4_with_disable(ptr nocapture %a) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 4
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !1
; CHECK: store i32
; CHECK-NOT: store i32
; CHECK: br i1
-define void @loop64(i32* nocapture %a) {
+define void @loop64(ptr nocapture %a) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 64
br i1 %exitcond, label %for.end, label %for.body
;
; CHECK-LABEL: @loop64_with_full(
; CHECK-NOT: br i1
-define void @loop64_with_full(i32* nocapture %a) {
+define void @loop64_with_full(ptr nocapture %a) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 64
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !3
; CHECK: store i32
; CHECK-NOT: store i32
; CHECK: br i1
-define void @loop64_with_count4(i32* nocapture %a) {
+define void @loop64_with_count4(ptr nocapture %a) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 64
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !5
; CHECK-LABEL: @runtime_loop_with_full(
; CHECK: store i32
; CHECK-NOT: store i32
-define void @runtime_loop_with_full(i32* nocapture %a, i32 %b) {
+define void @runtime_loop_with_full(ptr nocapture %a, i32 %b) {
entry:
%cmp3 = icmp sgt i32 %b, 0
br i1 %cmp3, label %for.body, label %for.end, !llvm.loop !8
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %b
; CHECK-NOT: store
; REM: br i1
; NOREM-NOT: br i1
-define void @runtime_loop_with_count4(i32* nocapture %a, i32 %b) {
+define void @runtime_loop_with_count4(ptr nocapture %a, i32 %b) {
entry:
%cmp3 = icmp sgt i32 %b, 0
br i1 %cmp3, label %for.body, label %for.end, !llvm.loop !9
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %b
; CHECK: store i32
; CHECK-NOT: store i32
; CHECK: br i1
-define void @unroll_1(i32* nocapture %a, i32 %b) {
+define void @unroll_1(ptr nocapture %a, i32 %b) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 4
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !10
;
; CHECK-LABEL: @loop64_with_enable(
; CHECK-NOT: br i1
-define void @loop64_with_enable(i32* nocapture %a) {
+define void @loop64_with_enable(ptr nocapture %a) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 64
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !12
; CHECK-NOT: store
; REM: br i1
; NOREM-NOT: br i1
-define void @runtime_loop_with_enable(i32* nocapture %a, i32 %b) {
+define void @runtime_loop_with_enable(ptr nocapture %a, i32 %b) {
entry:
%cmp3 = icmp sgt i32 %b, 0
br i1 %cmp3, label %for.body, label %for.end, !llvm.loop !8
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %b
; NOREM-NOT: store
; CHECK-NOT: store
; REM: br i1
-define void @runtime_loop_with_count3(i32* nocapture %a, i32 %b) {
+define void @runtime_loop_with_count3(ptr nocapture %a, i32 %b) {
entry:
%cmp3 = icmp sgt i32 %b, 0
br i1 %cmp3, label %for.body, label %for.end, !llvm.loop !16
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %b
%struct.spam = type { double, double, double, double, double, double, double }
-define void @test2(i32* %arg, i64* %out) {
+define void @test2(ptr %arg, ptr %out) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_HEADER:%.*]]
; CHECK: for.header:
-; CHECK-NEXT: store i32 0, i32* [[ARG:%.*]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARG:%.*]], align 4
; CHECK-NEXT: br label [[FOR_LATCH:%.*]]
; CHECK: for.latch:
-; CHECK-NEXT: store volatile i64 0, i64* [[OUT:%.*]], align 4
-; CHECK-NEXT: [[PTR_1:%.*]] = getelementptr inbounds i32, i32* [[ARG]], i64 1
-; CHECK-NEXT: store i32 0, i32* [[PTR_1]], align 4
+; CHECK-NEXT: store volatile i64 0, ptr [[OUT:%.*]], align 4
+; CHECK-NEXT: [[PTR_1:%.*]] = getelementptr inbounds i32, ptr [[ARG]], i64 1
+; CHECK-NEXT: store i32 0, ptr [[PTR_1]], align 4
; CHECK-NEXT: br label [[FOR_LATCH_1:%.*]]
; CHECK: for.latch.1:
-; CHECK-NEXT: store volatile i64 1, i64* [[OUT]], align 4
-; CHECK-NEXT: [[PTR_2:%.*]] = getelementptr inbounds i32, i32* [[ARG]], i64 2
-; CHECK-NEXT: store i32 0, i32* [[PTR_2]], align 4
+; CHECK-NEXT: store volatile i64 1, ptr [[OUT]], align 4
+; CHECK-NEXT: [[PTR_2:%.*]] = getelementptr inbounds i32, ptr [[ARG]], i64 2
+; CHECK-NEXT: store i32 0, ptr [[PTR_2]], align 4
; CHECK-NEXT: br label [[FOR_LATCH_2:%.*]]
; CHECK: for.latch.2:
-; CHECK-NEXT: store volatile i64 2, i64* [[OUT]], align 4
-; CHECK-NEXT: [[PTR_3:%.*]] = getelementptr inbounds i32, i32* [[ARG]], i64 3
-; CHECK-NEXT: store i32 0, i32* [[PTR_3]], align 4
+; CHECK-NEXT: store volatile i64 2, ptr [[OUT]], align 4
+; CHECK-NEXT: [[PTR_3:%.*]] = getelementptr inbounds i32, ptr [[ARG]], i64 3
+; CHECK-NEXT: store i32 0, ptr [[PTR_3]], align 4
; CHECK-NEXT: br i1 true, label [[IF_END_LOOPEXIT:%.*]], label [[FOR_LATCH_3:%.*]]
; CHECK: for.latch.3:
-; CHECK-NEXT: store volatile i64 3, i64* [[OUT]], align 4
+; CHECK-NEXT: store volatile i64 3, ptr [[OUT]], align 4
; CHECK-NEXT: unreachable
; CHECK: if.end.loopexit:
; CHECK-NEXT: ret void
for.header: ; preds = %for.latch, %entry
%indvars.iv800 = phi i64 [ 0, %entry ], [ %indvars.iv.next801, %for.latch ]
- %ptr = getelementptr inbounds i32, i32* %arg, i64 %indvars.iv800
- store i32 0, i32* %ptr, align 4
+ %ptr = getelementptr inbounds i32, ptr %arg, i64 %indvars.iv800
+ store i32 0, ptr %ptr, align 4
%indvars.iv.next801 = add nuw nsw i64 %indvars.iv800, 1
%exitcond802 = icmp eq i64 %indvars.iv.next801, 4
br i1 %exitcond802, label %if.end.loopexit, label %for.latch
for.latch: ; preds = %for.header
- store volatile i64 %indvars.iv800, i64* %out
+ store volatile i64 %indvars.iv800, ptr %out
br label %for.header
if.end.loopexit: ; preds = %for.header
ret void
}
-define double @test_with_lcssa(double %arg1, double* %arg2) {
+define double @test_with_lcssa(double %arg1, ptr %arg2) {
; CHECK-LABEL: @test_with_lcssa(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK-NEXT: [[RES:%.*]] = fsub double [[ARG1:%.*]], 3.000000e+00
; CHECK-NEXT: br label [[LOOP_LATCH:%.*]]
; CHECK: loop.latch:
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds double, double* [[ARG2:%.*]], i64 1
-; CHECK-NEXT: [[LV:%.*]] = load double, double* [[PTR]], align 8
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds double, ptr [[ARG2:%.*]], i64 1
+; CHECK-NEXT: [[LV:%.*]] = load double, ptr [[PTR]], align 8
; CHECK-NEXT: [[RES_1:%.*]] = fsub double [[LV]], [[RES]]
; CHECK-NEXT: br i1 true, label [[LOOP_EXIT:%.*]], label [[LOOP_LATCH_1:%.*]]
; CHECK: loop.latch.1:
br i1 %cond, label %loop.exit, label %loop.latch
loop.latch: ; preds = %bb366
- %ptr = getelementptr inbounds double, double* %arg2, i64 %iv.next
- %lv = load double, double* %ptr, align 8
+ %ptr = getelementptr inbounds double, ptr %arg2, i64 %iv.next
+ %lv = load double, ptr %ptr, align 8
br label %loop.header
loop.exit: ; preds = %bb366
}
; We unroll the outer loop and need to preserve LI for the inner loop.
-define void @test_with_nested_loop(i32* %arg) {
+define void @test_with_nested_loop(ptr %arg) {
; CHECK-LABEL: @test_with_nested_loop(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[OUTER_HEADER:%.*]]
; CHECK-NEXT: br label [[INNER_BODY:%.*]]
; CHECK: inner.body:
; CHECK-NEXT: [[J_IV:%.*]] = phi i64 [ [[J_IV_NEXT:%.*]], [[INNER_BODY]] ], [ 0, [[INNER_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, i32* [[ARG:%.*]], i64 [[J_IV]]
-; CHECK-NEXT: store i32 0, i32* [[PTR]], align 4
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, ptr [[ARG:%.*]], i64 [[J_IV]]
+; CHECK-NEXT: store i32 0, ptr [[PTR]], align 4
; CHECK-NEXT: [[J_IV_NEXT]] = add nuw nsw i64 [[J_IV]], 1
; CHECK-NEXT: [[INNER_COND:%.*]] = icmp eq i64 [[J_IV_NEXT]], 40000
; CHECK-NEXT: br i1 [[INNER_COND]], label [[OUTER_LATCH:%.*]], label [[INNER_BODY]]
; CHECK: inner.body.1:
; CHECK-NEXT: [[J_IV_1:%.*]] = phi i64 [ [[J_IV_NEXT_1:%.*]], [[INNER_BODY_1]] ], [ 0, [[INNER_BODY_PREHEADER_1]] ]
; CHECK-NEXT: [[IDX_1:%.*]] = add i64 1, [[J_IV_1]]
-; CHECK-NEXT: [[PTR_1:%.*]] = getelementptr inbounds i32, i32* [[ARG]], i64 [[IDX_1]]
-; CHECK-NEXT: store i32 0, i32* [[PTR_1]], align 4
+; CHECK-NEXT: [[PTR_1:%.*]] = getelementptr inbounds i32, ptr [[ARG]], i64 [[IDX_1]]
+; CHECK-NEXT: store i32 0, ptr [[PTR_1]], align 4
; CHECK-NEXT: [[J_IV_NEXT_1]] = add nuw nsw i64 [[J_IV_1]], 1
; CHECK-NEXT: [[INNER_COND_1:%.*]] = icmp eq i64 [[J_IV_NEXT_1]], 40000
; CHECK-NEXT: br i1 [[INNER_COND_1]], label [[OUTER_LATCH_1:%.*]], label [[INNER_BODY_1]]
; CHECK: inner.body.2:
; CHECK-NEXT: [[J_IV_2:%.*]] = phi i64 [ [[J_IV_NEXT_2:%.*]], [[INNER_BODY_2]] ], [ 0, [[INNER_BODY_PREHEADER_2]] ]
; CHECK-NEXT: [[IDX_2:%.*]] = add i64 2, [[J_IV_2]]
-; CHECK-NEXT: [[PTR_2:%.*]] = getelementptr inbounds i32, i32* [[ARG]], i64 [[IDX_2]]
-; CHECK-NEXT: store i32 0, i32* [[PTR_2]], align 4
+; CHECK-NEXT: [[PTR_2:%.*]] = getelementptr inbounds i32, ptr [[ARG]], i64 [[IDX_2]]
+; CHECK-NEXT: store i32 0, ptr [[PTR_2]], align 4
; CHECK-NEXT: [[J_IV_NEXT_2]] = add nuw nsw i64 [[J_IV_2]], 1
; CHECK-NEXT: [[INNER_COND_2:%.*]] = icmp eq i64 [[J_IV_NEXT_2]], 40000
; CHECK-NEXT: br i1 [[INNER_COND_2]], label [[OUTER_LATCH_2:%.*]], label [[INNER_BODY_2]]
inner.body:
%j.iv = phi i64 [ 0, %outer.header ], [ %j.iv.next, %inner.body ]
%idx = add i64 %outer.iv, %j.iv
- %ptr = getelementptr inbounds i32, i32* %arg, i64 %idx
- store i32 0, i32* %ptr, align 4
+ %ptr = getelementptr inbounds i32, ptr %arg, i64 %idx
+ store i32 0, ptr %ptr, align 4
%j.iv.next = add nuw nsw i64 %j.iv, 1
%inner.cond = icmp eq i64 %j.iv.next, 40000
br i1 %inner.cond, label %outer.latch, label %inner.body
}
; We unroll the inner loop and need to preserve LI for the outer loop.
-define void @test_with_nested_loop_unroll_inner(i32* %arg) {
+define void @test_with_nested_loop_unroll_inner(ptr %arg) {
; CHECK-LABEL: @test_with_nested_loop_unroll_inner(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[OUTER_HEADER:%.*]]
; CHECK: inner.body.preheader:
; CHECK-NEXT: br label [[INNER_BODY]]
; CHECK: inner.body:
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, i32* [[ARG:%.*]], i64 [[OUTER_IV]]
-; CHECK-NEXT: store i32 0, i32* [[PTR]], align 4
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, ptr [[ARG:%.*]], i64 [[OUTER_IV]]
+; CHECK-NEXT: store i32 0, ptr [[PTR]], align 4
; CHECK-NEXT: [[IDX_1:%.*]] = add i64 [[OUTER_IV]], 1
-; CHECK-NEXT: [[PTR_1:%.*]] = getelementptr inbounds i32, i32* [[ARG]], i64 [[IDX_1]]
-; CHECK-NEXT: store i32 0, i32* [[PTR_1]], align 4
+; CHECK-NEXT: [[PTR_1:%.*]] = getelementptr inbounds i32, ptr [[ARG]], i64 [[IDX_1]]
+; CHECK-NEXT: store i32 0, ptr [[PTR_1]], align 4
; CHECK-NEXT: br label [[OUTER_HEADER]]
; CHECK: exit:
; CHECK-NEXT: ret void
inner.body:
%j.iv = phi i64 [ 0, %outer.header ], [ %j.iv.next, %inner.body ]
%idx = add i64 %outer.iv, %j.iv
- %ptr = getelementptr inbounds i32, i32* %arg, i64 %idx
- store i32 0, i32* %ptr, align 4
+ %ptr = getelementptr inbounds i32, ptr %arg, i64 %idx
+ store i32 0, ptr %ptr, align 4
%j.iv.next = add nuw nsw i64 %j.iv, 1
%inner.cond = icmp eq i64 %j.iv.next, 2
br i1 %inner.cond, label %outer.latch, label %inner.body