%T = type { i32, i32, i32, i32 }
-define internal i32 @test(%T* %p) {
+define internal i32 @test(ptr %p) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: read)
; CHECK-LABEL: define {{[^@]+}}@test
-; CHECK-SAME: (%T* nocapture nofree readonly [[P:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT: [[A_GEP:%.*]] = getelementptr [[T:%.*]], %T* [[P]], i64 0, i32 3
-; CHECK-NEXT: [[B_GEP:%.*]] = getelementptr [[T]], %T* [[P]], i64 0, i32 2
-; CHECK-NEXT: [[A:%.*]] = load i32, i32* [[A_GEP]], align 4
-; CHECK-NEXT: [[B:%.*]] = load i32, i32* [[B_GEP]], align 4
+; CHECK-SAME: (ptr nocapture nofree readonly [[P:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[A_GEP:%.*]] = getelementptr [[T:%.*]], ptr [[P]], i64 0, i32 3
+; CHECK-NEXT: [[B_GEP:%.*]] = getelementptr [[T]], ptr [[P]], i64 0, i32 2
+; CHECK-NEXT: [[A:%.*]] = load i32, ptr [[A_GEP]], align 4
+; CHECK-NEXT: [[B:%.*]] = load i32, ptr [[B_GEP]], align 4
; CHECK-NEXT: [[V:%.*]] = add i32 [[A]], [[B]]
; CHECK-NEXT: ret i32 [[V]]
;
%a.gep = getelementptr %T, %T* %p, i64 0, i32 3
- %b.gep = getelementptr %T, %T* %p, i64 0, i32 2
- %a = load i32, i32* %a.gep
- %b = load i32, i32* %b.gep
+ %b.gep = getelementptr %T, ptr %p, i64 0, i32 2
+ %a = load i32, ptr %a.gep
+ %b = load i32, ptr %b.gep
%v = add i32 %a, %b
ret i32 %v
}
-define i32 @caller(%T* %p) {
+define i32 @caller(ptr %p) {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: read)
; TUNIT-LABEL: define {{[^@]+}}@caller
-; TUNIT-SAME: (%T* nocapture nofree readonly [[P:%.*]]) #[[ATTR0]] {
-; TUNIT-NEXT: [[V:%.*]] = musttail call i32 @test(%T* nocapture nofree readonly [[P]]) #[[ATTR4:[0-9]+]]
+; TUNIT-SAME: (ptr nocapture nofree readonly [[P:%.*]]) #[[ATTR0]] {
+; TUNIT-NEXT: [[V:%.*]] = musttail call i32 @test(ptr nocapture nofree readonly [[P]]) #[[ATTR4:[0-9]+]]
; TUNIT-NEXT: ret i32 [[V]]
;
; CGSCC: Function Attrs: nofree nosync nounwind willreturn memory(argmem: read)
; CGSCC-LABEL: define {{[^@]+}}@caller
-; CGSCC-SAME: (%T* nocapture nofree readonly [[P:%.*]]) #[[ATTR1:[0-9]+]] {
-; CGSCC-NEXT: [[V:%.*]] = musttail call i32 @test(%T* nocapture nofree readonly [[P]]) #[[ATTR5:[0-9]+]]
+; CGSCC-SAME: (ptr nocapture nofree readonly [[P:%.*]]) #[[ATTR1:[0-9]+]] {
+; CGSCC-NEXT: [[V:%.*]] = musttail call i32 @test(ptr nocapture nofree readonly [[P]]) #[[ATTR5:[0-9]+]]
; CGSCC-NEXT: ret i32 [[V]]
;
%v = musttail call i32 @test(%T* %p)
; Don't promote arguments of musttail caller
-define i32 @foo(%T* %p, i32 %v) {
+define i32 @foo(ptr %p, i32 %v) {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; TUNIT-LABEL: define {{[^@]+}}@foo
-; TUNIT-SAME: (%T* nocapture nofree readnone [[P:%.*]], i32 [[V:%.*]]) #[[ATTR1:[0-9]+]] {
+; TUNIT-SAME: (ptr nocapture nofree readnone [[P:%.*]], i32 [[V:%.*]]) #[[ATTR1:[0-9]+]] {
; TUNIT-NEXT: ret i32 0
;
; CGSCC: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CGSCC-LABEL: define {{[^@]+}}@foo
-; CGSCC-SAME: (%T* nocapture nofree readnone [[P:%.*]], i32 [[V:%.*]]) #[[ATTR2:[0-9]+]] {
+; CGSCC-SAME: (ptr nocapture nofree readnone [[P:%.*]], i32 [[V:%.*]]) #[[ATTR2:[0-9]+]] {
; CGSCC-NEXT: ret i32 0
;
ret i32 0
}
-define internal i32 @test2(%T* %p, i32 %p2) {
+define internal i32 @test2(ptr %p, i32 %p2) {
; CGSCC: Function Attrs: nofree nosync nounwind willreturn memory(argmem: read)
; CGSCC-LABEL: define {{[^@]+}}@test2
-; CGSCC-SAME: (%T* nocapture nofree readonly [[P:%.*]], i32 [[P2:%.*]]) #[[ATTR1]] {
-; CGSCC-NEXT: [[A_GEP:%.*]] = getelementptr [[T:%.*]], %T* [[P]], i64 0, i32 3
-; CGSCC-NEXT: [[B_GEP:%.*]] = getelementptr [[T]], %T* [[P]], i64 0, i32 2
-; CGSCC-NEXT: [[A:%.*]] = load i32, i32* [[A_GEP]], align 4
-; CGSCC-NEXT: [[B:%.*]] = load i32, i32* [[B_GEP]], align 4
+; CGSCC-SAME: (ptr nocapture nofree readonly [[P:%.*]], i32 [[P2:%.*]]) #[[ATTR1]] {
+; CGSCC-NEXT: [[A_GEP:%.*]] = getelementptr [[T:%.*]], ptr [[P]], i64 0, i32 3
+; CGSCC-NEXT: [[B_GEP:%.*]] = getelementptr [[T]], ptr [[P]], i64 0, i32 2
+; CGSCC-NEXT: [[A:%.*]] = load i32, ptr [[A_GEP]], align 4
+; CGSCC-NEXT: [[B:%.*]] = load i32, ptr [[B_GEP]], align 4
; CGSCC-NEXT: [[V:%.*]] = add i32 [[A]], [[B]]
-; CGSCC-NEXT: [[CA:%.*]] = musttail call noundef i32 @foo(%T* undef, i32 [[V]]) #[[ATTR5]]
+; CGSCC-NEXT: [[CA:%.*]] = musttail call noundef i32 @foo(ptr undef, i32 [[V]]) #[[ATTR5]]
; CGSCC-NEXT: ret i32 [[CA]]
;
%a.gep = getelementptr %T, %T* %p, i64 0, i32 3
- %b.gep = getelementptr %T, %T* %p, i64 0, i32 2
- %a = load i32, i32* %a.gep
- %b = load i32, i32* %b.gep
+ %b.gep = getelementptr %T, ptr %p, i64 0, i32 2
+ %a = load i32, ptr %a.gep
+ %b = load i32, ptr %b.gep
%v = add i32 %a, %b
- %ca = musttail call i32 @foo(%T* undef, i32 %v)
+ %ca = musttail call i32 @foo(ptr undef, i32 %v)
ret i32 %ca
}
-define i32 @caller2(%T* %g) {
+define i32 @caller2(ptr %g) {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; TUNIT-LABEL: define {{[^@]+}}@caller2
-; TUNIT-SAME: (%T* nocapture nofree readnone [[G:%.*]]) #[[ATTR1]] {
+; TUNIT-SAME: (ptr nocapture nofree readnone [[G:%.*]]) #[[ATTR1]] {
; TUNIT-NEXT: ret i32 0
;
; CGSCC: Function Attrs: nofree nosync nounwind willreturn memory(argmem: read)
; CGSCC-LABEL: define {{[^@]+}}@caller2
-; CGSCC-SAME: (%T* nocapture nofree readonly align 4 [[G:%.*]]) #[[ATTR1]] {
-; CGSCC-NEXT: [[V:%.*]] = call noundef i32 @test2(%T* nocapture nofree readonly [[G]], i32 noundef 0) #[[ATTR5]]
+; CGSCC-SAME: (ptr nocapture nofree readonly align 4 [[G:%.*]]) #[[ATTR1]] {
+; CGSCC-NEXT: [[V:%.*]] = call noundef i32 @test2(ptr nocapture nofree readonly [[G]], i32 noundef 0) #[[ATTR5]]
; CGSCC-NEXT: ret i32 [[V]]
;
%v = call i32 @test2(%T* %g, i32 0)
; In the version below we keep the call and verify the return value
; is kept as well.
-define i32 @bar(%T* %p, i32 %v) {
+define i32 @bar(ptr %p, i32 %v) {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: write)
; TUNIT-LABEL: define {{[^@]+}}@bar
-; TUNIT-SAME: (%T* nocapture nofree nonnull writeonly dereferenceable(4) [[P:%.*]], i32 [[V:%.*]]) #[[ATTR2:[0-9]+]] {
-; TUNIT-NEXT: [[I32PTR:%.*]] = getelementptr [[T:%.*]], %T* [[P]], i64 0, i32 0
-; TUNIT-NEXT: store i32 [[V]], i32* [[I32PTR]], align 4
+; TUNIT-SAME: (ptr nocapture nofree nonnull writeonly dereferenceable(4) [[P:%.*]], i32 [[V:%.*]]) #[[ATTR2:[0-9]+]] {
+; TUNIT-NEXT: store i32 [[V]], ptr [[P]], align 4
; TUNIT-NEXT: ret i32 0
;
; CGSCC: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: write)
; CGSCC-LABEL: define {{[^@]+}}@bar
-; CGSCC-SAME: (%T* nocapture nofree nonnull writeonly dereferenceable(4) [[P:%.*]], i32 [[V:%.*]]) #[[ATTR3:[0-9]+]] {
-; CGSCC-NEXT: [[I32PTR:%.*]] = getelementptr [[T:%.*]], %T* [[P]], i64 0, i32 0
-; CGSCC-NEXT: store i32 [[V]], i32* [[I32PTR]], align 4
+; CGSCC-SAME: (ptr nocapture nofree nonnull writeonly dereferenceable(4) [[P:%.*]], i32 [[V:%.*]]) #[[ATTR3:[0-9]+]] {
+; CGSCC-NEXT: store i32 [[V]], ptr [[P]], align 4
; CGSCC-NEXT: ret i32 0
;
%i32ptr = getelementptr %T, %T* %p, i64 0, i32 0
- store i32 %v, i32* %i32ptr
+ store i32 %v, ptr %i32ptr
ret i32 0
}
-define internal i32 @test2b(%T* %p, i32 %p2) {
+define internal i32 @test2b(ptr %p, i32 %p2) {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
; TUNIT-LABEL: define {{[^@]+}}@test2b
-; TUNIT-SAME: (%T* nocapture nofree readonly [[P:%.*]], i32 [[P2:%.*]]) #[[ATTR3:[0-9]+]] {
-; TUNIT-NEXT: [[A_GEP:%.*]] = getelementptr [[T:%.*]], %T* [[P]], i64 0, i32 3
-; TUNIT-NEXT: [[B_GEP:%.*]] = getelementptr [[T]], %T* [[P]], i64 0, i32 2
-; TUNIT-NEXT: [[A:%.*]] = load i32, i32* [[A_GEP]], align 4
-; TUNIT-NEXT: [[B:%.*]] = load i32, i32* [[B_GEP]], align 4
+; TUNIT-SAME: (ptr nocapture nofree readonly [[P:%.*]], i32 [[P2:%.*]]) #[[ATTR3:[0-9]+]] {
+; TUNIT-NEXT: [[A_GEP:%.*]] = getelementptr [[T:%.*]], ptr [[P]], i64 0, i32 3
+; TUNIT-NEXT: [[B_GEP:%.*]] = getelementptr [[T]], ptr [[P]], i64 0, i32 2
+; TUNIT-NEXT: [[A:%.*]] = load i32, ptr [[A_GEP]], align 4
+; TUNIT-NEXT: [[B:%.*]] = load i32, ptr [[B_GEP]], align 4
; TUNIT-NEXT: [[V:%.*]] = add i32 [[A]], [[B]]
-; TUNIT-NEXT: [[CA:%.*]] = musttail call noundef i32 @bar(%T* undef, i32 [[V]]) #[[ATTR4]]
+; TUNIT-NEXT: [[CA:%.*]] = musttail call noundef i32 @bar(ptr undef, i32 [[V]]) #[[ATTR4]]
; TUNIT-NEXT: ret i32 [[CA]]
;
; CGSCC: Function Attrs: nofree nosync nounwind willreturn memory(argmem: readwrite)
; CGSCC-LABEL: define {{[^@]+}}@test2b
-; CGSCC-SAME: (%T* nocapture nofree readonly [[P:%.*]], i32 [[P2:%.*]]) #[[ATTR4:[0-9]+]] {
-; CGSCC-NEXT: [[A_GEP:%.*]] = getelementptr [[T:%.*]], %T* [[P]], i64 0, i32 3
-; CGSCC-NEXT: [[B_GEP:%.*]] = getelementptr [[T]], %T* [[P]], i64 0, i32 2
-; CGSCC-NEXT: [[A:%.*]] = load i32, i32* [[A_GEP]], align 4
-; CGSCC-NEXT: [[B:%.*]] = load i32, i32* [[B_GEP]], align 4
+; CGSCC-SAME: (ptr nocapture nofree readonly [[P:%.*]], i32 [[P2:%.*]]) #[[ATTR4:[0-9]+]] {
+; CGSCC-NEXT: [[A_GEP:%.*]] = getelementptr [[T:%.*]], ptr [[P]], i64 0, i32 3
+; CGSCC-NEXT: [[B_GEP:%.*]] = getelementptr [[T]], ptr [[P]], i64 0, i32 2
+; CGSCC-NEXT: [[A:%.*]] = load i32, ptr [[A_GEP]], align 4
+; CGSCC-NEXT: [[B:%.*]] = load i32, ptr [[B_GEP]], align 4
; CGSCC-NEXT: [[V:%.*]] = add i32 [[A]], [[B]]
-; CGSCC-NEXT: [[CA:%.*]] = musttail call noundef i32 @bar(%T* undef, i32 [[V]]) #[[ATTR6:[0-9]+]]
+; CGSCC-NEXT: [[CA:%.*]] = musttail call noundef i32 @bar(ptr undef, i32 [[V]]) #[[ATTR6:[0-9]+]]
; CGSCC-NEXT: ret i32 [[CA]]
;
%a.gep = getelementptr %T, %T* %p, i64 0, i32 3
- %b.gep = getelementptr %T, %T* %p, i64 0, i32 2
- %a = load i32, i32* %a.gep
- %b = load i32, i32* %b.gep
+ %b.gep = getelementptr %T, ptr %p, i64 0, i32 2
+ %a = load i32, ptr %a.gep
+ %b = load i32, ptr %b.gep
%v = add i32 %a, %b
- %ca = musttail call i32 @bar(%T* undef, i32 %v)
+ %ca = musttail call i32 @bar(ptr undef, i32 %v)
ret i32 %ca
}
-define i32 @caller2b(%T* %g) {
+define i32 @caller2b(ptr %g) {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
; TUNIT-LABEL: define {{[^@]+}}@caller2b
-; TUNIT-SAME: (%T* nocapture nofree readonly [[G:%.*]]) #[[ATTR3]] {
-; TUNIT-NEXT: [[V:%.*]] = call noundef i32 @test2b(%T* nocapture nofree readonly [[G]], i32 undef) #[[ATTR4]]
+; TUNIT-SAME: (ptr nocapture nofree readonly [[G:%.*]]) #[[ATTR3]] {
+; TUNIT-NEXT: [[V:%.*]] = call noundef i32 @test2b(ptr nocapture nofree readonly [[G]], i32 undef) #[[ATTR4]]
; TUNIT-NEXT: ret i32 [[V]]
;
; CGSCC: Function Attrs: nofree nosync nounwind willreturn memory(argmem: readwrite)
; CGSCC-LABEL: define {{[^@]+}}@caller2b
-; CGSCC-SAME: (%T* nocapture nofree readonly align 4 [[G:%.*]]) #[[ATTR4]] {
-; CGSCC-NEXT: [[V:%.*]] = call noundef i32 @test2b(%T* nocapture nofree readonly [[G]], i32 noundef 0) #[[ATTR7:[0-9]+]]
+; CGSCC-SAME: (ptr nocapture nofree readonly align 4 [[G:%.*]]) #[[ATTR4]] {
+; CGSCC-NEXT: [[V:%.*]] = call noundef i32 @test2b(ptr nocapture nofree readonly [[G]], i32 noundef 0) #[[ATTR7:[0-9]+]]
; CGSCC-NEXT: ret i32 [[V]]
;
%v = call i32 @test2b(%T* %g, i32 0)
; CHECK: @[[CND:[a-zA-Z0-9_$"\\.-]+]] = external global i1
; CHECK: @[[G:[a-zA-Z0-9_$"\\.-]+]] = global i8 0, align 32
;.
-define i32* @test1(i32* align 8 %0) #0 {
+define ptr @test1(ptr align 8 %0) #0 {
; CHECK: Function Attrs: nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable
; CHECK-LABEL: define {{[^@]+}}@test1
-; CHECK-SAME: (i32* nofree readnone returned align 8 "no-capture-maybe-returned" [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT: ret i32* [[TMP0]]
+; CHECK-SAME: (ptr nofree readnone returned align 8 "no-capture-maybe-returned" [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: ret ptr [[TMP0]]
;
ret i32* %0
}
; TEST 2
-define i32* @test2(i32* %0) #0 {
+define ptr @test2(ptr %0) #0 {
; CHECK: Function Attrs: nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable
; CHECK-LABEL: define {{[^@]+}}@test2
-; CHECK-SAME: (i32* nofree readnone returned "no-capture-maybe-returned" [[TMP0:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: ret i32* [[TMP0]]
+; CHECK-SAME: (ptr nofree readnone returned "no-capture-maybe-returned" [[TMP0:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: ret ptr [[TMP0]]
;
ret i32* %0
}
; TEST 3
-define i32* @test3(i32* align 8 %0, i32* align 4 %1, i1 %2) #0 {
+define ptr @test3(ptr align 8 %0, ptr align 4 %1, i1 %2) #0 {
; CHECK: Function Attrs: nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable
; CHECK-LABEL: define {{[^@]+}}@test3
-; CHECK-SAME: (i32* nofree readnone align 8 "no-capture-maybe-returned" [[TMP0:%.*]], i32* nofree readnone align 4 "no-capture-maybe-returned" [[TMP1:%.*]], i1 [[TMP2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[RET:%.*]] = select i1 [[TMP2]], i32* [[TMP0]], i32* [[TMP1]]
-; CHECK-NEXT: ret i32* [[RET]]
+; CHECK-SAME: (ptr nofree readnone align 8 "no-capture-maybe-returned" [[TMP0:%.*]], ptr nofree readnone align 4 "no-capture-maybe-returned" [[TMP1:%.*]], i1 [[TMP2:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[RET:%.*]] = select i1 [[TMP2]], ptr [[TMP0]], ptr [[TMP1]]
+; CHECK-NEXT: ret ptr [[RET]]
;
%ret = select i1 %2, i32* %0, i32* %1
- ret i32* %ret
+ ret ptr %ret
}
; TEST 4
-define i32* @test4(i32* align 32 %0, i32* align 32 %1, i1 %2) #0 {
+define ptr @test4(ptr align 32 %0, ptr align 32 %1, i1 %2) #0 {
; CHECK: Function Attrs: nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable
; CHECK-LABEL: define {{[^@]+}}@test4
-; CHECK-SAME: (i32* nofree readnone align 32 "no-capture-maybe-returned" [[TMP0:%.*]], i32* nofree readnone align 32 "no-capture-maybe-returned" [[TMP1:%.*]], i1 [[TMP2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[RET:%.*]] = select i1 [[TMP2]], i32* [[TMP0]], i32* [[TMP1]]
-; CHECK-NEXT: ret i32* [[RET]]
+; CHECK-SAME: (ptr nofree readnone align 32 "no-capture-maybe-returned" [[TMP0:%.*]], ptr nofree readnone align 32 "no-capture-maybe-returned" [[TMP1:%.*]], i1 [[TMP2:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[RET:%.*]] = select i1 [[TMP2]], ptr [[TMP0]], ptr [[TMP1]]
+; CHECK-NEXT: ret ptr [[RET]]
;
%ret = select i1 %2, i32* %0, i32* %1
- ret i32* %ret
+ ret ptr %ret
}
; TEST 5
-declare i32* @unknown()
-declare align 8 i32* @align8()
+declare ptr @unknown()
+declare align 8 ptr @align8()
-define i32* @test5_1() {
+define ptr @test5_1() {
; CHECK-LABEL: define {{[^@]+}}@test5_1() {
-; CHECK-NEXT: [[RET:%.*]] = tail call align 8 i32* @unknown()
-; CHECK-NEXT: ret i32* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = tail call align 8 ptr @unknown()
+; CHECK-NEXT: ret ptr [[RET]]
;
%ret = tail call align 8 i32* @unknown()
- ret i32* %ret
+ ret ptr %ret
}
-define i32* @test5_2() {
+define ptr @test5_2() {
; CHECK-LABEL: define {{[^@]+}}@test5_2() {
-; CHECK-NEXT: [[RET:%.*]] = tail call align 8 i32* @align8()
-; CHECK-NEXT: ret i32* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = tail call align 8 ptr @align8()
+; CHECK-NEXT: ret ptr [[RET]]
;
%ret = tail call i32* @align8()
- ret i32* %ret
+ ret ptr %ret
}
; TEST 6
; SCC
-define i32* @test6_1() #0 {
+define ptr @test6_1() #0 {
; TUNIT: Function Attrs: nofree noinline nosync nounwind willreturn memory(none) uwtable
; TUNIT-LABEL: define {{[^@]+}}@test6_1
; TUNIT-SAME: () #[[ATTR1:[0-9]+]] {
-; TUNIT-NEXT: ret i32* undef
+; TUNIT-NEXT: ret ptr undef
;
; CGSCC: Function Attrs: nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable
; CGSCC-LABEL: define {{[^@]+}}@test6_1
; CGSCC-SAME: () #[[ATTR0]] {
-; CGSCC-NEXT: ret i32* undef
+; CGSCC-NEXT: ret ptr undef
;
%ret = tail call i32* @test6_2()
- ret i32* %ret
+ ret ptr %ret
}
-define i32* @test6_2() #0 {
+define ptr @test6_2() #0 {
; TUNIT: Function Attrs: nofree noinline nosync nounwind willreturn memory(none) uwtable
; TUNIT-LABEL: define {{[^@]+}}@test6_2
; TUNIT-SAME: () #[[ATTR1]] {
-; TUNIT-NEXT: ret i32* undef
+; TUNIT-NEXT: ret ptr undef
;
; CGSCC: Function Attrs: nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable
; CGSCC-LABEL: define {{[^@]+}}@test6_2
; CGSCC-SAME: () #[[ATTR0]] {
-; CGSCC-NEXT: ret i32* undef
+; CGSCC-NEXT: ret ptr undef
;
%ret = tail call i32* @test6_1()
- ret i32* %ret
+ ret ptr %ret
}
@a2 = common global i8 0, align 16
; Function Attrs: nounwind readnone ssp uwtable
-define internal i8* @f1(i8* readnone %0) local_unnamed_addr #0 {
+define internal ptr @f1(ptr readnone %0) local_unnamed_addr #0 {
; CHECK: Function Attrs: nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable
; CHECK-LABEL: define {{[^@]+}}@f1
-; CHECK-SAME: (i8* noalias nofree nonnull readnone align 8 dereferenceable(1) "no-capture-maybe-returned" [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-SAME: (ptr noalias nofree nonnull readnone align 8 dereferenceable(1) "no-capture-maybe-returned" [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
; CHECK-NEXT: br label [[TMP3:%.*]]
; CHECK: 2:
; CHECK-NEXT: unreachable
; CHECK: 3:
-; CHECK-NEXT: ret i8* [[TMP0]]
+; CHECK-NEXT: ret ptr [[TMP0]]
;
%2 = icmp eq i8* %0, null
br i1 %2, label %3, label %5
; <label>:3: ; preds = %1
- %4 = tail call i8* @f2(i8* nonnull @a1)
- %l = load i8, i8* %4
+ %4 = tail call ptr @f2(ptr nonnull @a1)
+ %l = load i8, ptr %4
br label %5
; <label>:5: ; preds = %1, %3
- %6 = phi i8* [ %4, %3 ], [ %0, %1 ]
- ret i8* %6
+ %6 = phi ptr [ %4, %3 ], [ %0, %1 ]
+ ret ptr %6
}
; Function Attrs: nounwind readnone ssp uwtable
-define internal i8* @f2(i8* readnone %0) local_unnamed_addr #0 {
+define internal ptr @f2(ptr readnone %0) local_unnamed_addr #0 {
; CGSCC: Function Attrs: noinline nounwind uwtable
; CGSCC-LABEL: define {{[^@]+}}@f2
-; CGSCC-SAME: (i8* readnone [[TMP0:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] {
-; CGSCC-NEXT: [[TMP2:%.*]] = icmp eq i8* undef, null
+; CGSCC-SAME: (ptr readnone [[TMP0:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] {
+; CGSCC-NEXT: [[TMP2:%.*]] = icmp eq ptr undef, null
; CGSCC-NEXT: br i1 [[TMP2]], label [[TMP5:%.*]], label [[TMP3:%.*]]
; CGSCC: 3:
-; CGSCC-NEXT: [[TMP4:%.*]] = tail call i8* @f1(i8* noalias nonnull readnone align 4294967296 dereferenceable(4294967295) undef)
+; CGSCC-NEXT: [[TMP4:%.*]] = tail call ptr @f1(ptr noalias nonnull readnone align 4294967296 dereferenceable(4294967295) undef)
; CGSCC-NEXT: br label [[TMP7:%.*]]
; CGSCC: 5:
-; CGSCC-NEXT: [[TMP6:%.*]] = tail call i8* @f3()
+; CGSCC-NEXT: [[TMP6:%.*]] = tail call ptr @f3()
; CGSCC-NEXT: br label [[TMP7]]
; CGSCC: 7:
-; CGSCC-NEXT: [[TMP8:%.*]] = phi i8* [ [[TMP4]], [[TMP3]] ], [ [[TMP6]], [[TMP5]] ]
-; CGSCC-NEXT: ret i8* [[TMP8]]
+; CGSCC-NEXT: [[TMP8:%.*]] = phi ptr [ [[TMP4]], [[TMP3]] ], [ [[TMP6]], [[TMP5]] ]
+; CGSCC-NEXT: ret ptr [[TMP8]]
;
%2 = icmp eq i8* %0, null
br i1 %2, label %5, label %3
; <label>:3: ; preds = %1
- %4 = tail call i8* @f1(i8* nonnull %0)
+ %4 = tail call ptr @f1(ptr nonnull %0)
br label %7
; <label>:5: ; preds = %1
- %6 = tail call i8* @f3(i8* nonnull @a2)
+ %6 = tail call ptr @f3(ptr nonnull @a2)
br label %7
; <label>:7: ; preds = %5, %3
- %8 = phi i8* [ %4, %3 ], [ %6, %5 ]
- ret i8* %8
+ %8 = phi ptr [ %4, %3 ], [ %6, %5 ]
+ ret ptr %8
}
; Function Attrs: nounwind readnone ssp uwtable
-define internal i8* @f3(i8* readnone %0) local_unnamed_addr #0 {
+define internal ptr @f3(ptr readnone %0) local_unnamed_addr #0 {
; CGSCC: Function Attrs: nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable
; CGSCC-LABEL: define {{[^@]+}}@f3
; CGSCC-SAME: () local_unnamed_addr #[[ATTR0]] {
; CGSCC: 1:
; CGSCC-NEXT: unreachable
; CGSCC: 2:
-; CGSCC-NEXT: ret i8* @a1
+; CGSCC-NEXT: ret ptr @a1
;
%2 = icmp eq i8* %0, null
br i1 %2, label %3, label %5
; <label>:3: ; preds = %1
- %4 = tail call i8* @f1(i8* nonnull @a2)
+ %4 = tail call ptr @f1(ptr nonnull @a2)
br label %5
; <label>:5: ; preds = %1, %3
- %6 = phi i8* [ %4, %3 ], [ @a1, %1 ]
- ret i8* %6
+ %6 = phi ptr [ %4, %3 ], [ @a1, %1 ]
+ ret ptr %6
}
; TEST 7
; Better than IR information
-define align 4 i8* @test7() #0 {
+define align 4 ptr @test7() #0 {
; TUNIT: Function Attrs: nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable
; TUNIT-LABEL: define {{[^@]+}}@test7
; TUNIT-SAME: () #[[ATTR0]] {
-; TUNIT-NEXT: [[C:%.*]] = tail call i8* @f1(i8* noalias nofree noundef nonnull readnone align 8 dereferenceable(1) "no-capture-maybe-returned" @a1) #[[ATTR11:[0-9]+]]
-; TUNIT-NEXT: ret i8* [[C]]
+; TUNIT-NEXT: [[C:%.*]] = tail call ptr @f1(ptr noalias nofree noundef nonnull readnone align 8 dereferenceable(1) "no-capture-maybe-returned" @a1) #[[ATTR11:[0-9]+]]
+; TUNIT-NEXT: ret ptr [[C]]
;
; CGSCC: Function Attrs: nofree noinline nosync nounwind willreturn memory(none) uwtable
; CGSCC-LABEL: define {{[^@]+}}@test7
; CGSCC-SAME: () #[[ATTR2:[0-9]+]] {
-; CGSCC-NEXT: [[C:%.*]] = tail call noundef nonnull align 8 dereferenceable(1) i8* @f1(i8* noalias nofree noundef nonnull readnone align 8 dereferenceable(1) @a1) #[[ATTR13:[0-9]+]]
-; CGSCC-NEXT: ret i8* [[C]]
+; CGSCC-NEXT: [[C:%.*]] = tail call noundef nonnull align 8 dereferenceable(1) ptr @f1(ptr noalias nofree noundef nonnull readnone align 8 dereferenceable(1) @a1) #[[ATTR13:[0-9]+]]
+; CGSCC-NEXT: ret ptr [[C]]
;
%c = tail call i8* @f1(i8* align 8 dereferenceable(1) @a1)
- ret i8* %c
+ ret ptr %c
}
; TEST 7b
; Function Attrs: nounwind readnone ssp uwtable
-define internal i8* @f1b(i8* readnone %0) local_unnamed_addr #0 {
+define internal ptr @f1b(ptr readnone %0) local_unnamed_addr #0 {
; CGSCC: Function Attrs: nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable
; CGSCC-LABEL: define {{[^@]+}}@f1b
-; CGSCC-SAME: (i8* noalias nofree nonnull readnone align 8 dereferenceable(1) "no-capture-maybe-returned" [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CGSCC-SAME: (ptr noalias nofree nonnull readnone align 8 dereferenceable(1) "no-capture-maybe-returned" [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
; CGSCC-NEXT: br label [[TMP3:%.*]]
; CGSCC: 2:
; CGSCC-NEXT: unreachable
; CGSCC: 3:
-; CGSCC-NEXT: ret i8* [[TMP0]]
+; CGSCC-NEXT: ret ptr [[TMP0]]
;
%2 = icmp eq i8* %0, null
br i1 %2, label %3, label %5
; <label>:3: ; preds = %1
- %4 = tail call i8* @f2b(i8* nonnull @a1)
- %l = load i8, i8* %4
- store i8 %l, i8* @a1
+ %4 = tail call ptr @f2b(ptr nonnull @a1)
+ %l = load i8, ptr %4
+ store i8 %l, ptr @a1
br label %5
; <label>:5: ; preds = %1, %3
- %6 = phi i8* [ %4, %3 ], [ %0, %1 ]
- ret i8* %6
+ %6 = phi ptr [ %4, %3 ], [ %0, %1 ]
+ ret ptr %6
}
; Function Attrs: nounwind readnone ssp uwtable
-define internal i8* @f2b(i8* readnone %0) local_unnamed_addr #0 {
+define internal ptr @f2b(ptr readnone %0) local_unnamed_addr #0 {
;
; CGSCC: Function Attrs: noinline nounwind uwtable
; CGSCC-LABEL: define {{[^@]+}}@f2b
-; CGSCC-SAME: (i8* readnone [[TMP0:%.*]]) local_unnamed_addr #[[ATTR1]] {
-; CGSCC-NEXT: [[TMP2:%.*]] = icmp eq i8* undef, null
+; CGSCC-SAME: (ptr readnone [[TMP0:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CGSCC-NEXT: [[TMP2:%.*]] = icmp eq ptr undef, null
; CGSCC-NEXT: br i1 [[TMP2]], label [[TMP5:%.*]], label [[TMP3:%.*]]
; CGSCC: 3:
-; CGSCC-NEXT: [[TMP4:%.*]] = tail call i8* @f1b(i8* noalias nonnull readnone align 4294967296 dereferenceable(4294967295) undef)
+; CGSCC-NEXT: [[TMP4:%.*]] = tail call ptr @f1b(ptr noalias nonnull readnone align 4294967296 dereferenceable(4294967295) undef)
; CGSCC-NEXT: br label [[TMP7:%.*]]
; CGSCC: 5:
-; CGSCC-NEXT: [[TMP6:%.*]] = tail call i8* @f3b()
+; CGSCC-NEXT: [[TMP6:%.*]] = tail call ptr @f3b()
; CGSCC-NEXT: br label [[TMP7]]
; CGSCC: 7:
-; CGSCC-NEXT: [[TMP8:%.*]] = phi i8* [ [[TMP4]], [[TMP3]] ], [ [[TMP6]], [[TMP5]] ]
-; CGSCC-NEXT: ret i8* [[TMP8]]
+; CGSCC-NEXT: [[TMP8:%.*]] = phi ptr [ [[TMP4]], [[TMP3]] ], [ [[TMP6]], [[TMP5]] ]
+; CGSCC-NEXT: ret ptr [[TMP8]]
;
%2 = icmp eq i8* %0, null
br i1 %2, label %5, label %3
; <label>:3: ; preds = %1
- %4 = tail call i8* @f1b(i8* nonnull %0)
+ %4 = tail call ptr @f1b(ptr nonnull %0)
br label %7
; <label>:5: ; preds = %1
- %6 = tail call i8* @f3b(i8* nonnull @a2)
+ %6 = tail call ptr @f3b(ptr nonnull @a2)
br label %7
; <label>:7: ; preds = %5, %3
- %8 = phi i8* [ %4, %3 ], [ %6, %5 ]
- ret i8* %8
+ %8 = phi ptr [ %4, %3 ], [ %6, %5 ]
+ ret ptr %8
}
; Function Attrs: nounwind readnone ssp uwtable
-define internal i8* @f3b(i8* readnone %0) local_unnamed_addr #0 {
+define internal ptr @f3b(ptr readnone %0) local_unnamed_addr #0 {
;
; CGSCC: Function Attrs: nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable
; CGSCC-LABEL: define {{[^@]+}}@f3b
; CGSCC: 1:
; CGSCC-NEXT: unreachable
; CGSCC: 2:
-; CGSCC-NEXT: ret i8* @a1
+; CGSCC-NEXT: ret ptr @a1
;
%2 = icmp eq i8* %0, null
br i1 %2, label %3, label %5
; <label>:3: ; preds = %1
- %4 = tail call i8* @f1b(i8* nonnull @a2)
+ %4 = tail call ptr @f1b(ptr nonnull @a2)
br label %5
; <label>:5: ; preds = %1, %3
- %6 = phi i8* [ %4, %3 ], [ @a1, %1 ]
- ret i8* %6
+ %6 = phi ptr [ %4, %3 ], [ @a1, %1 ]
+ ret ptr %6
}
-define align 4 i32* @test7b(i32* align 32 %p) #0 {
+define align 4 ptr @test7b(ptr align 32 %p) #0 {
; TUNIT: Function Attrs: nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable
; TUNIT-LABEL: define {{[^@]+}}@test7b
-; TUNIT-SAME: (i32* nofree readnone returned align 32 "no-capture-maybe-returned" [[P:%.*]]) #[[ATTR0]] {
-; TUNIT-NEXT: ret i32* [[P]]
+; TUNIT-SAME: (ptr nofree readnone returned align 32 "no-capture-maybe-returned" [[P:%.*]]) #[[ATTR0]] {
+; TUNIT-NEXT: ret ptr [[P]]
;
; CGSCC: Function Attrs: nofree noinline nosync nounwind willreturn memory(none) uwtable
; CGSCC-LABEL: define {{[^@]+}}@test7b
-; CGSCC-SAME: (i32* nofree readnone returned align 32 "no-capture-maybe-returned" [[P:%.*]]) #[[ATTR2]] {
-; CGSCC-NEXT: ret i32* [[P]]
+; CGSCC-SAME: (ptr nofree readnone returned align 32 "no-capture-maybe-returned" [[P:%.*]]) #[[ATTR2]] {
+; CGSCC-NEXT: ret ptr [[P]]
;
tail call i8* @f1b(i8* align 8 dereferenceable(1) @a1)
- ret i32* %p
+ ret ptr %p
}
; TEST 8
define void @test8_helper() {
; TUNIT-LABEL: define {{[^@]+}}@test8_helper() {
-; TUNIT-NEXT: [[PTR0:%.*]] = tail call i32* @unknown()
-; TUNIT-NEXT: [[PTR1:%.*]] = tail call align 4 i32* @unknown()
-; TUNIT-NEXT: [[PTR2:%.*]] = tail call align 8 i32* @unknown()
-; TUNIT-NEXT: tail call void @test8(i32* noalias nocapture readnone align 4 [[PTR1]], i32* noalias nocapture readnone align 4 [[PTR1]], i32* noalias nocapture readnone [[PTR0]]) #[[ATTR2:[0-9]+]]
-; TUNIT-NEXT: tail call void @test8(i32* noalias nocapture readnone align 8 [[PTR2]], i32* noalias nocapture readnone align 4 [[PTR1]], i32* noalias nocapture readnone align 4 [[PTR1]]) #[[ATTR2]]
-; TUNIT-NEXT: tail call void @test8(i32* noalias nocapture readnone align 8 [[PTR2]], i32* noalias nocapture readnone align 4 [[PTR1]], i32* noalias nocapture readnone align 4 [[PTR1]]) #[[ATTR2]]
+; TUNIT-NEXT: [[PTR0:%.*]] = tail call ptr @unknown()
+; TUNIT-NEXT: [[PTR1:%.*]] = tail call align 4 ptr @unknown()
+; TUNIT-NEXT: [[PTR2:%.*]] = tail call align 8 ptr @unknown()
+; TUNIT-NEXT: tail call void @test8(ptr noalias nocapture readnone align 4 [[PTR1]], ptr noalias nocapture readnone align 4 [[PTR1]], ptr noalias nocapture readnone [[PTR0]]) #[[ATTR2:[0-9]+]]
+; TUNIT-NEXT: tail call void @test8(ptr noalias nocapture readnone align 8 [[PTR2]], ptr noalias nocapture readnone align 4 [[PTR1]], ptr noalias nocapture readnone align 4 [[PTR1]]) #[[ATTR2]]
+; TUNIT-NEXT: tail call void @test8(ptr noalias nocapture readnone align 8 [[PTR2]], ptr noalias nocapture readnone align 4 [[PTR1]], ptr noalias nocapture readnone align 4 [[PTR1]]) #[[ATTR2]]
; TUNIT-NEXT: ret void
;
; CGSCC-LABEL: define {{[^@]+}}@test8_helper() {
-; CGSCC-NEXT: [[PTR0:%.*]] = tail call i32* @unknown()
-; CGSCC-NEXT: [[PTR1:%.*]] = tail call align 4 i32* @unknown()
-; CGSCC-NEXT: [[PTR2:%.*]] = tail call align 8 i32* @unknown()
-; CGSCC-NEXT: tail call void @test8(i32* noalias nocapture readnone align 4 [[PTR1]], i32* noalias nocapture readnone align 4 [[PTR1]], i32* noalias nocapture readnone [[PTR0]]) #[[ATTR3:[0-9]+]]
-; CGSCC-NEXT: tail call void @test8(i32* noalias nocapture readnone align 8 [[PTR2]], i32* noalias nocapture readnone align 4 [[PTR1]], i32* noalias nocapture readnone align 4 [[PTR1]]) #[[ATTR3]]
-; CGSCC-NEXT: tail call void @test8(i32* noalias nocapture readnone align 8 [[PTR2]], i32* noalias nocapture readnone align 4 [[PTR1]], i32* noalias nocapture readnone align 4 [[PTR1]]) #[[ATTR3]]
+; CGSCC-NEXT: [[PTR0:%.*]] = tail call ptr @unknown()
+; CGSCC-NEXT: [[PTR1:%.*]] = tail call align 4 ptr @unknown()
+; CGSCC-NEXT: [[PTR2:%.*]] = tail call align 8 ptr @unknown()
+; CGSCC-NEXT: tail call void @test8(ptr noalias nocapture readnone align 4 [[PTR1]], ptr noalias nocapture readnone align 4 [[PTR1]], ptr noalias nocapture readnone [[PTR0]]) #[[ATTR3:[0-9]+]]
+; CGSCC-NEXT: tail call void @test8(ptr noalias nocapture readnone align 8 [[PTR2]], ptr noalias nocapture readnone align 4 [[PTR1]], ptr noalias nocapture readnone align 4 [[PTR1]]) #[[ATTR3]]
+; CGSCC-NEXT: tail call void @test8(ptr noalias nocapture readnone align 8 [[PTR2]], ptr noalias nocapture readnone align 4 [[PTR1]], ptr noalias nocapture readnone align 4 [[PTR1]]) #[[ATTR3]]
; CGSCC-NEXT: ret void
;
%ptr0 = tail call i32* @unknown()
- %ptr1 = tail call align 4 i32* @unknown()
- %ptr2 = tail call align 8 i32* @unknown()
+ %ptr1 = tail call align 4 ptr @unknown()
+ %ptr2 = tail call align 8 ptr @unknown()
- tail call void @test8(i32* %ptr1, i32* %ptr1, i32* %ptr0)
- tail call void @test8(i32* %ptr2, i32* %ptr1, i32* %ptr1)
- tail call void @test8(i32* %ptr2, i32* %ptr1, i32* %ptr1)
+ tail call void @test8(ptr %ptr1, ptr %ptr1, ptr %ptr0)
+ tail call void @test8(ptr %ptr2, ptr %ptr1, ptr %ptr1)
+ tail call void @test8(ptr %ptr2, ptr %ptr1, ptr %ptr1)
ret void
}
-declare void @user_i32_ptr(i32* nocapture readnone) nounwind
-define internal void @test8(i32* %a, i32* %b, i32* %c) {
+declare void @user_i32_ptr(ptr nocapture readnone) nounwind
+define internal void @test8(ptr %a, ptr %b, ptr %c) {
; TUNIT: Function Attrs: nounwind
; TUNIT-LABEL: define {{[^@]+}}@test8
-; TUNIT-SAME: (i32* noalias nocapture readnone align 4 [[A:%.*]], i32* noalias nocapture readnone align 4 [[B:%.*]], i32* noalias nocapture readnone [[C:%.*]]) #[[ATTR2]] {
-; TUNIT-NEXT: call void @user_i32_ptr(i32* noalias nocapture readnone align 4 [[A]]) #[[ATTR2]]
-; TUNIT-NEXT: call void @user_i32_ptr(i32* noalias nocapture readnone align 4 [[B]]) #[[ATTR2]]
-; TUNIT-NEXT: call void @user_i32_ptr(i32* noalias nocapture readnone [[C]]) #[[ATTR2]]
+; TUNIT-SAME: (ptr noalias nocapture readnone align 4 [[A:%.*]], ptr noalias nocapture readnone align 4 [[B:%.*]], ptr noalias nocapture readnone [[C:%.*]]) #[[ATTR2]] {
+; TUNIT-NEXT: call void @user_i32_ptr(ptr noalias nocapture readnone align 4 [[A]]) #[[ATTR2]]
+; TUNIT-NEXT: call void @user_i32_ptr(ptr noalias nocapture readnone align 4 [[B]]) #[[ATTR2]]
+; TUNIT-NEXT: call void @user_i32_ptr(ptr noalias nocapture readnone [[C]]) #[[ATTR2]]
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: nounwind
; CGSCC-LABEL: define {{[^@]+}}@test8
-; CGSCC-SAME: (i32* noalias nocapture readnone align 4 [[A:%.*]], i32* noalias nocapture readnone align 4 [[B:%.*]], i32* noalias nocapture readnone [[C:%.*]]) #[[ATTR3]] {
-; CGSCC-NEXT: call void @user_i32_ptr(i32* noalias nocapture readnone align 4 [[A]]) #[[ATTR3]]
-; CGSCC-NEXT: call void @user_i32_ptr(i32* noalias nocapture readnone align 4 [[B]]) #[[ATTR3]]
-; CGSCC-NEXT: call void @user_i32_ptr(i32* noalias nocapture readnone [[C]]) #[[ATTR3]]
+; CGSCC-SAME: (ptr noalias nocapture readnone align 4 [[A:%.*]], ptr noalias nocapture readnone align 4 [[B:%.*]], ptr noalias nocapture readnone [[C:%.*]]) #[[ATTR3]] {
+; CGSCC-NEXT: call void @user_i32_ptr(ptr noalias nocapture readnone align 4 [[A]]) #[[ATTR3]]
+; CGSCC-NEXT: call void @user_i32_ptr(ptr noalias nocapture readnone align 4 [[B]]) #[[ATTR3]]
+; CGSCC-NEXT: call void @user_i32_ptr(ptr noalias nocapture readnone [[C]]) #[[ATTR3]]
; CGSCC-NEXT: ret void
;
call void @user_i32_ptr(i32* %a)
- call void @user_i32_ptr(i32* %b)
- call void @user_i32_ptr(i32* %c)
+ call void @user_i32_ptr(ptr %b)
+ call void @user_i32_ptr(ptr %c)
ret void
}
-declare void @test9_helper(i32* %A)
-define void @test9_traversal(i1 %cnd, i32* align 4 %B, i32* align 8 %C) {
+declare void @test9_helper(ptr %A)
+define void @test9_traversal(i1 %cnd, ptr align 4 %B, ptr align 8 %C) {
; CHECK-LABEL: define {{[^@]+}}@test9_traversal
-; CHECK-SAME: (i1 [[CND:%.*]], i32* align 4 [[B:%.*]], i32* align 8 [[C:%.*]]) {
-; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CND]], i32* [[B]], i32* [[C]]
-; CHECK-NEXT: call void @test9_helper(i32* align 4 [[SEL]])
+; CHECK-SAME: (i1 [[CND:%.*]], ptr align 4 [[B:%.*]], ptr align 8 [[C:%.*]]) {
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CND]], ptr [[B]], ptr [[C]]
+; CHECK-NEXT: call void @test9_helper(ptr align 4 [[SEL]])
; CHECK-NEXT: ret void
;
%sel = select i1 %cnd, i32* %B, i32* %C
- call void @test9_helper(i32* %sel)
+ call void @test9_helper(ptr %sel)
ret void
}
; store i32 1, i32* %r, align 32
; FIXME: This will work with an upcoming patch (D66618 or similar)
; store i32 -1, i32* %g1, align 32
-define i32* @test10a(i32* align 32 %p) {
+define ptr @test10a(ptr align 32 %p) {
; TUNIT: Function Attrs: nofree nosync nounwind
; TUNIT-LABEL: define {{[^@]+}}@test10a
-; TUNIT-SAME: (i32* nofree noundef nonnull align 32 dereferenceable(4) "no-capture-maybe-returned" [[P:%.*]]) #[[ATTR3:[0-9]+]] {
-; TUNIT-NEXT: [[L:%.*]] = load i32, i32* [[P]], align 32
+; TUNIT-SAME: (ptr nofree noundef nonnull align 32 dereferenceable(4) "no-capture-maybe-returned" [[P:%.*]]) #[[ATTR3:[0-9]+]] {
+; TUNIT-NEXT: [[L:%.*]] = load i32, ptr [[P]], align 32
; TUNIT-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 0
; TUNIT-NEXT: br i1 [[C]], label [[T:%.*]], label [[F:%.*]]
; TUNIT: t:
-; TUNIT-NEXT: [[R:%.*]] = call align 32 i32* @test10a(i32* nofree noundef nonnull align 32 dereferenceable(4) "no-capture-maybe-returned" [[P]]) #[[ATTR3]]
-; TUNIT-NEXT: store i32 1, i32* [[R]], align 32
-; TUNIT-NEXT: [[G0:%.*]] = getelementptr i32, i32* [[P]], i32 8
+; TUNIT-NEXT: [[R:%.*]] = call align 32 ptr @test10a(ptr nofree noundef nonnull align 32 dereferenceable(4) "no-capture-maybe-returned" [[P]]) #[[ATTR3]]
+; TUNIT-NEXT: store i32 1, ptr [[R]], align 32
+; TUNIT-NEXT: [[G0:%.*]] = getelementptr i32, ptr [[P]], i32 8
; TUNIT-NEXT: br label [[E:%.*]]
; TUNIT: f:
-; TUNIT-NEXT: [[G1:%.*]] = getelementptr i32, i32* [[P]], i32 8
-; TUNIT-NEXT: store i32 -1, i32* [[G1]], align 32
+; TUNIT-NEXT: [[G1:%.*]] = getelementptr i32, ptr [[P]], i32 8
+; TUNIT-NEXT: store i32 -1, ptr [[G1]], align 32
; TUNIT-NEXT: br label [[E]]
; TUNIT: e:
-; TUNIT-NEXT: [[PHI:%.*]] = phi i32* [ [[G0]], [[T]] ], [ [[G1]], [[F]] ]
-; TUNIT-NEXT: ret i32* [[PHI]]
+; TUNIT-NEXT: [[PHI:%.*]] = phi ptr [ [[G0]], [[T]] ], [ [[G1]], [[F]] ]
+; TUNIT-NEXT: ret ptr [[PHI]]
;
; CGSCC: Function Attrs: nofree nosync nounwind
; CGSCC-LABEL: define {{[^@]+}}@test10a
-; CGSCC-SAME: (i32* nofree noundef nonnull align 32 dereferenceable(4) "no-capture-maybe-returned" [[P:%.*]]) #[[ATTR4:[0-9]+]] {
-; CGSCC-NEXT: [[L:%.*]] = load i32, i32* [[P]], align 32
+; CGSCC-SAME: (ptr nofree noundef nonnull align 32 dereferenceable(4) "no-capture-maybe-returned" [[P:%.*]]) #[[ATTR4:[0-9]+]] {
+; CGSCC-NEXT: [[L:%.*]] = load i32, ptr [[P]], align 32
; CGSCC-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 0
; CGSCC-NEXT: br i1 [[C]], label [[T:%.*]], label [[F:%.*]]
; CGSCC: t:
-; CGSCC-NEXT: [[R:%.*]] = call align 32 i32* @test10a(i32* nofree noundef nonnull align 32 dereferenceable(4) "no-capture-maybe-returned" [[P]]) #[[ATTR4]]
-; CGSCC-NEXT: store i32 1, i32* [[R]], align 32
-; CGSCC-NEXT: [[G0:%.*]] = getelementptr i32, i32* [[P]], i32 8
+; CGSCC-NEXT: [[R:%.*]] = call align 32 ptr @test10a(ptr nofree noundef nonnull align 32 dereferenceable(4) "no-capture-maybe-returned" [[P]]) #[[ATTR4]]
+; CGSCC-NEXT: store i32 1, ptr [[R]], align 32
+; CGSCC-NEXT: [[G0:%.*]] = getelementptr i32, ptr [[P]], i32 8
; CGSCC-NEXT: br label [[E:%.*]]
; CGSCC: f:
-; CGSCC-NEXT: [[G1:%.*]] = getelementptr i32, i32* [[P]], i32 8
-; CGSCC-NEXT: store i32 -1, i32* [[G1]], align 32
+; CGSCC-NEXT: [[G1:%.*]] = getelementptr i32, ptr [[P]], i32 8
+; CGSCC-NEXT: store i32 -1, ptr [[G1]], align 32
; CGSCC-NEXT: br label [[E]]
; CGSCC: e:
-; CGSCC-NEXT: [[PHI:%.*]] = phi i32* [ [[G0]], [[T]] ], [ [[G1]], [[F]] ]
-; CGSCC-NEXT: ret i32* [[PHI]]
+; CGSCC-NEXT: [[PHI:%.*]] = phi ptr [ [[G0]], [[T]] ], [ [[G1]], [[F]] ]
+; CGSCC-NEXT: ret ptr [[PHI]]
;
%l = load i32, i32* %p
%c = icmp eq i32 %l, 0
br i1 %c, label %t, label %f
t:
- %r = call i32* @test10a(i32* %p)
- store i32 1, i32* %r
- %g0 = getelementptr i32, i32* %p, i32 8
+ %r = call ptr @test10a(ptr %p)
+ store i32 1, ptr %r
+ %g0 = getelementptr i32, ptr %p, i32 8
br label %e
f:
- %g1 = getelementptr i32, i32* %p, i32 8
- store i32 -1, i32* %g1
+ %g1 = getelementptr i32, ptr %p, i32 8
+ store i32 -1, ptr %g1
br label %e
e:
- %phi = phi i32* [%g0, %t], [%g1, %f]
- ret i32* %phi
+ %phi = phi ptr [%g0, %t], [%g1, %f]
+ ret ptr %phi
}
; FIXME: This will work with an upcoming patch (D66618 or similar)
; store i32 1, i32* %r, align 32
; FIXME: This will work with an upcoming patch (D66618 or similar)
; store i32 -1, i32* %g1, align 32
-define i32* @test10b(i32* align 32 %p) {
+define ptr @test10b(ptr align 32 %p) {
; TUNIT: Function Attrs: nofree nosync nounwind
; TUNIT-LABEL: define {{[^@]+}}@test10b
-; TUNIT-SAME: (i32* nofree noundef nonnull align 32 dereferenceable(4) "no-capture-maybe-returned" [[P:%.*]]) #[[ATTR3]] {
-; TUNIT-NEXT: [[L:%.*]] = load i32, i32* [[P]], align 32
+; TUNIT-SAME: (ptr nofree noundef nonnull align 32 dereferenceable(4) "no-capture-maybe-returned" [[P:%.*]]) #[[ATTR3]] {
+; TUNIT-NEXT: [[L:%.*]] = load i32, ptr [[P]], align 32
; TUNIT-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 0
; TUNIT-NEXT: br i1 [[C]], label [[T:%.*]], label [[F:%.*]]
; TUNIT: t:
-; TUNIT-NEXT: [[R:%.*]] = call align 32 i32* @test10b(i32* nofree noundef nonnull align 32 dereferenceable(4) "no-capture-maybe-returned" [[P]]) #[[ATTR3]]
-; TUNIT-NEXT: store i32 1, i32* [[R]], align 32
-; TUNIT-NEXT: [[G0:%.*]] = getelementptr i32, i32* [[P]], i32 8
+; TUNIT-NEXT: [[R:%.*]] = call align 32 ptr @test10b(ptr nofree noundef nonnull align 32 dereferenceable(4) "no-capture-maybe-returned" [[P]]) #[[ATTR3]]
+; TUNIT-NEXT: store i32 1, ptr [[R]], align 32
+; TUNIT-NEXT: [[G0:%.*]] = getelementptr i32, ptr [[P]], i32 8
; TUNIT-NEXT: br label [[E:%.*]]
; TUNIT: f:
-; TUNIT-NEXT: [[G1:%.*]] = getelementptr i32, i32* [[P]], i32 -8
-; TUNIT-NEXT: store i32 -1, i32* [[G1]], align 32
+; TUNIT-NEXT: [[G1:%.*]] = getelementptr i32, ptr [[P]], i32 -8
+; TUNIT-NEXT: store i32 -1, ptr [[G1]], align 32
; TUNIT-NEXT: br label [[E]]
; TUNIT: e:
-; TUNIT-NEXT: [[PHI:%.*]] = phi i32* [ [[G0]], [[T]] ], [ [[G1]], [[F]] ]
-; TUNIT-NEXT: ret i32* [[PHI]]
+; TUNIT-NEXT: [[PHI:%.*]] = phi ptr [ [[G0]], [[T]] ], [ [[G1]], [[F]] ]
+; TUNIT-NEXT: ret ptr [[PHI]]
;
; CGSCC: Function Attrs: nofree nosync nounwind
; CGSCC-LABEL: define {{[^@]+}}@test10b
-; CGSCC-SAME: (i32* nofree noundef nonnull align 32 dereferenceable(4) "no-capture-maybe-returned" [[P:%.*]]) #[[ATTR4]] {
-; CGSCC-NEXT: [[L:%.*]] = load i32, i32* [[P]], align 32
+; CGSCC-SAME: (ptr nofree noundef nonnull align 32 dereferenceable(4) "no-capture-maybe-returned" [[P:%.*]]) #[[ATTR4]] {
+; CGSCC-NEXT: [[L:%.*]] = load i32, ptr [[P]], align 32
; CGSCC-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 0
; CGSCC-NEXT: br i1 [[C]], label [[T:%.*]], label [[F:%.*]]
; CGSCC: t:
-; CGSCC-NEXT: [[R:%.*]] = call align 32 i32* @test10b(i32* nofree noundef nonnull align 32 dereferenceable(4) "no-capture-maybe-returned" [[P]]) #[[ATTR4]]
-; CGSCC-NEXT: store i32 1, i32* [[R]], align 32
-; CGSCC-NEXT: [[G0:%.*]] = getelementptr i32, i32* [[P]], i32 8
+; CGSCC-NEXT: [[R:%.*]] = call align 32 ptr @test10b(ptr nofree noundef nonnull align 32 dereferenceable(4) "no-capture-maybe-returned" [[P]]) #[[ATTR4]]
+; CGSCC-NEXT: store i32 1, ptr [[R]], align 32
+; CGSCC-NEXT: [[G0:%.*]] = getelementptr i32, ptr [[P]], i32 8
; CGSCC-NEXT: br label [[E:%.*]]
; CGSCC: f:
-; CGSCC-NEXT: [[G1:%.*]] = getelementptr i32, i32* [[P]], i32 -8
-; CGSCC-NEXT: store i32 -1, i32* [[G1]], align 32
+; CGSCC-NEXT: [[G1:%.*]] = getelementptr i32, ptr [[P]], i32 -8
+; CGSCC-NEXT: store i32 -1, ptr [[G1]], align 32
; CGSCC-NEXT: br label [[E]]
; CGSCC: e:
-; CGSCC-NEXT: [[PHI:%.*]] = phi i32* [ [[G0]], [[T]] ], [ [[G1]], [[F]] ]
-; CGSCC-NEXT: ret i32* [[PHI]]
+; CGSCC-NEXT: [[PHI:%.*]] = phi ptr [ [[G0]], [[T]] ], [ [[G1]], [[F]] ]
+; CGSCC-NEXT: ret ptr [[PHI]]
;
%l = load i32, i32* %p
%c = icmp eq i32 %l, 0
br i1 %c, label %t, label %f
t:
- %r = call i32* @test10b(i32* %p)
- store i32 1, i32* %r
- %g0 = getelementptr i32, i32* %p, i32 8
+ %r = call ptr @test10b(ptr %p)
+ store i32 1, ptr %r
+ %g0 = getelementptr i32, ptr %p, i32 8
br label %e
f:
- %g1 = getelementptr i32, i32* %p, i32 -8
- store i32 -1, i32* %g1
+ %g1 = getelementptr i32, ptr %p, i32 -8
+ store i32 -1, ptr %g1
br label %e
e:
- %phi = phi i32* [%g0, %t], [%g1, %f]
- ret i32* %phi
+ %phi = phi ptr [%g0, %t], [%g1, %f]
+ ret ptr %phi
}
-define i64 @test11(i32* %p) {
+define i64 @test11(ptr %p) {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: read)
; TUNIT-LABEL: define {{[^@]+}}@test11
-; TUNIT-SAME: (i32* nocapture nofree nonnull readonly align 8 dereferenceable(8) [[P:%.*]]) #[[ATTR4:[0-9]+]] {
-; TUNIT-NEXT: [[P_CAST:%.*]] = bitcast i32* [[P]] to i64*
-; TUNIT-NEXT: [[RET:%.*]] = load i64, i64* [[P_CAST]], align 8
+; TUNIT-SAME: (ptr nocapture nofree nonnull readonly align 8 dereferenceable(8) [[P:%.*]]) #[[ATTR4:[0-9]+]] {
+; TUNIT-NEXT: [[RET:%.*]] = load i64, ptr [[P]], align 8
; TUNIT-NEXT: ret i64 [[RET]]
;
; CGSCC: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: read)
; CGSCC-LABEL: define {{[^@]+}}@test11
-; CGSCC-SAME: (i32* nocapture nofree nonnull readonly align 8 dereferenceable(8) [[P:%.*]]) #[[ATTR5:[0-9]+]] {
-; CGSCC-NEXT: [[P_CAST:%.*]] = bitcast i32* [[P]] to i64*
-; CGSCC-NEXT: [[RET:%.*]] = load i64, i64* [[P_CAST]], align 8
+; CGSCC-SAME: (ptr nocapture nofree nonnull readonly align 8 dereferenceable(8) [[P:%.*]]) #[[ATTR5:[0-9]+]] {
+; CGSCC-NEXT: [[RET:%.*]] = load i64, ptr [[P]], align 8
; CGSCC-NEXT: ret i64 [[RET]]
;
%p-cast = bitcast i32* %p to i64*
- %ret = load i64, i64* %p-cast, align 8
+ %ret = load i64, ptr %p-cast, align 8
ret i64 %ret
}
; Test for deduction using must-be-executed-context and GEP instruction
; FXIME: %p should have nonnull
-define i64 @test12-1(i32* align 4 %p) {
+define i64 @test12-1(ptr align 4 %p) {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: read)
; TUNIT-LABEL: define {{[^@]+}}@test12-1
-; TUNIT-SAME: (i32* nocapture nofree readonly align 16 [[P:%.*]]) #[[ATTR4]] {
-; TUNIT-NEXT: [[P_CAST:%.*]] = bitcast i32* [[P]] to i64*
-; TUNIT-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, i64* [[P_CAST]], i64 1
-; TUNIT-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, i64* [[ARRAYIDX0]], i64 3
-; TUNIT-NEXT: [[RET:%.*]] = load i64, i64* [[ARRAYIDX1]], align 16
+; TUNIT-SAME: (ptr nocapture nofree readonly align 16 [[P:%.*]]) #[[ATTR4]] {
+; TUNIT-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; TUNIT-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; TUNIT-NEXT: [[RET:%.*]] = load i64, ptr [[ARRAYIDX1]], align 16
; TUNIT-NEXT: ret i64 [[RET]]
;
; CGSCC: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: read)
; CGSCC-LABEL: define {{[^@]+}}@test12-1
-; CGSCC-SAME: (i32* nocapture nofree readonly align 16 [[P:%.*]]) #[[ATTR5]] {
-; CGSCC-NEXT: [[P_CAST:%.*]] = bitcast i32* [[P]] to i64*
-; CGSCC-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, i64* [[P_CAST]], i64 1
-; CGSCC-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, i64* [[ARRAYIDX0]], i64 3
-; CGSCC-NEXT: [[RET:%.*]] = load i64, i64* [[ARRAYIDX1]], align 16
+; CGSCC-SAME: (ptr nocapture nofree readonly align 16 [[P:%.*]]) #[[ATTR5]] {
+; CGSCC-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; CGSCC-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; CGSCC-NEXT: [[RET:%.*]] = load i64, ptr [[ARRAYIDX1]], align 16
; CGSCC-NEXT: ret i64 [[RET]]
;
%p-cast = bitcast i32* %p to i64*
- %arrayidx0 = getelementptr i64, i64* %p-cast, i64 1
- %arrayidx1 = getelementptr i64, i64* %arrayidx0, i64 3
- %ret = load i64, i64* %arrayidx1, align 16
+ %arrayidx0 = getelementptr i64, ptr %p-cast, i64 1
+ %arrayidx1 = getelementptr i64, ptr %arrayidx0, i64 3
+ %ret = load i64, ptr %arrayidx1, align 16
ret i64 %ret
}
-define i64 @test12-2(i32* align 4 %p) {
+define i64 @test12-2(ptr align 4 %p) {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: read)
; TUNIT-LABEL: define {{[^@]+}}@test12-2
-; TUNIT-SAME: (i32* nocapture nofree nonnull readonly align 16 dereferenceable(8) [[P:%.*]]) #[[ATTR4]] {
-; TUNIT-NEXT: [[P_CAST:%.*]] = bitcast i32* [[P]] to i64*
-; TUNIT-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, i64* [[P_CAST]], i64 0
-; TUNIT-NEXT: [[RET:%.*]] = load i64, i64* [[ARRAYIDX0]], align 16
+; TUNIT-SAME: (ptr nocapture nofree nonnull readonly align 16 dereferenceable(8) [[P:%.*]]) #[[ATTR4]] {
+; TUNIT-NEXT: [[RET:%.*]] = load i64, ptr [[P]], align 16
; TUNIT-NEXT: ret i64 [[RET]]
;
; CGSCC: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: read)
; CGSCC-LABEL: define {{[^@]+}}@test12-2
-; CGSCC-SAME: (i32* nocapture nofree nonnull readonly align 16 dereferenceable(8) [[P:%.*]]) #[[ATTR5]] {
-; CGSCC-NEXT: [[P_CAST:%.*]] = bitcast i32* [[P]] to i64*
-; CGSCC-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, i64* [[P_CAST]], i64 0
-; CGSCC-NEXT: [[RET:%.*]] = load i64, i64* [[ARRAYIDX0]], align 16
+; CGSCC-SAME: (ptr nocapture nofree nonnull readonly align 16 dereferenceable(8) [[P:%.*]]) #[[ATTR5]] {
+; CGSCC-NEXT: [[RET:%.*]] = load i64, ptr [[P]], align 16
; CGSCC-NEXT: ret i64 [[RET]]
;
%p-cast = bitcast i32* %p to i64*
- %arrayidx0 = getelementptr i64, i64* %p-cast, i64 0
- %ret = load i64, i64* %arrayidx0, align 16
+ %ret = load i64, ptr %p-cast, align 16
ret i64 %ret
}
; FXIME: %p should have nonnull
-define void @test12-3(i32* align 4 %p) {
+define void @test12-3(ptr align 4 %p) {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: write)
; TUNIT-LABEL: define {{[^@]+}}@test12-3
-; TUNIT-SAME: (i32* nocapture nofree writeonly align 16 [[P:%.*]]) #[[ATTR5:[0-9]+]] {
-; TUNIT-NEXT: [[P_CAST:%.*]] = bitcast i32* [[P]] to i64*
-; TUNIT-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, i64* [[P_CAST]], i64 1
-; TUNIT-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, i64* [[ARRAYIDX0]], i64 3
-; TUNIT-NEXT: store i64 0, i64* [[ARRAYIDX1]], align 16
+; TUNIT-SAME: (ptr nocapture nofree writeonly align 16 [[P:%.*]]) #[[ATTR5:[0-9]+]] {
+; TUNIT-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; TUNIT-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; TUNIT-NEXT: store i64 0, ptr [[ARRAYIDX1]], align 16
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: write)
; CGSCC-LABEL: define {{[^@]+}}@test12-3
-; CGSCC-SAME: (i32* nocapture nofree writeonly align 16 [[P:%.*]]) #[[ATTR6:[0-9]+]] {
-; CGSCC-NEXT: [[P_CAST:%.*]] = bitcast i32* [[P]] to i64*
-; CGSCC-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, i64* [[P_CAST]], i64 1
-; CGSCC-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, i64* [[ARRAYIDX0]], i64 3
-; CGSCC-NEXT: store i64 0, i64* [[ARRAYIDX1]], align 16
+; CGSCC-SAME: (ptr nocapture nofree writeonly align 16 [[P:%.*]]) #[[ATTR6:[0-9]+]] {
+; CGSCC-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; CGSCC-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; CGSCC-NEXT: store i64 0, ptr [[ARRAYIDX1]], align 16
; CGSCC-NEXT: ret void
;
%p-cast = bitcast i32* %p to i64*
- %arrayidx0 = getelementptr i64, i64* %p-cast, i64 1
- %arrayidx1 = getelementptr i64, i64* %arrayidx0, i64 3
- store i64 0, i64* %arrayidx1, align 16
+ %arrayidx0 = getelementptr i64, ptr %p-cast, i64 1
+ %arrayidx1 = getelementptr i64, ptr %arrayidx0, i64 3
+ store i64 0, ptr %arrayidx1, align 16
ret void
}
-define void @test12-4(i32* align 4 %p) {
+define void @test12-4(ptr align 4 %p) {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: write)
; TUNIT-LABEL: define {{[^@]+}}@test12-4
-; TUNIT-SAME: (i32* nocapture nofree nonnull writeonly align 16 dereferenceable(8) [[P:%.*]]) #[[ATTR5]] {
-; TUNIT-NEXT: [[P_CAST:%.*]] = bitcast i32* [[P]] to i64*
-; TUNIT-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, i64* [[P_CAST]], i64 0
-; TUNIT-NEXT: store i64 0, i64* [[ARRAYIDX0]], align 16
+; TUNIT-SAME: (ptr nocapture nofree nonnull writeonly align 16 dereferenceable(8) [[P:%.*]]) #[[ATTR5]] {
+; TUNIT-NEXT: store i64 0, ptr [[P]], align 16
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: write)
; CGSCC-LABEL: define {{[^@]+}}@test12-4
-; CGSCC-SAME: (i32* nocapture nofree nonnull writeonly align 16 dereferenceable(8) [[P:%.*]]) #[[ATTR6]] {
-; CGSCC-NEXT: [[P_CAST:%.*]] = bitcast i32* [[P]] to i64*
-; CGSCC-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, i64* [[P_CAST]], i64 0
-; CGSCC-NEXT: store i64 0, i64* [[ARRAYIDX0]], align 16
+; CGSCC-SAME: (ptr nocapture nofree nonnull writeonly align 16 dereferenceable(8) [[P:%.*]]) #[[ATTR6]] {
+; CGSCC-NEXT: store i64 0, ptr [[P]], align 16
; CGSCC-NEXT: ret void
;
%p-cast = bitcast i32* %p to i64*
- %arrayidx0 = getelementptr i64, i64* %p-cast, i64 0
- store i64 0, i64* %arrayidx0, align 16
+ store i64 0, ptr %p-cast, align 16
ret void
}
-declare void @use(i64*) willreturn nounwind
+declare void @use(ptr) willreturn nounwind
-define void @test12-5(i32* align 4 %p) {
+define void @test12-5(ptr align 4 %p) {
; TUNIT: Function Attrs: nounwind willreturn
; TUNIT-LABEL: define {{[^@]+}}@test12-5
-; TUNIT-SAME: (i32* align 16 [[P:%.*]]) #[[ATTR6:[0-9]+]] {
-; TUNIT-NEXT: [[P_CAST:%.*]] = bitcast i32* [[P]] to i64*
-; TUNIT-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, i64* [[P_CAST]], i64 1
-; TUNIT-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, i64* [[ARRAYIDX0]], i64 3
-; TUNIT-NEXT: tail call void @use(i64* align 16 [[ARRAYIDX1]]) #[[ATTR6]]
+; TUNIT-SAME: (ptr align 16 [[P:%.*]]) #[[ATTR6:[0-9]+]] {
+; TUNIT-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; TUNIT-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; TUNIT-NEXT: tail call void @use(ptr align 16 [[ARRAYIDX1]]) #[[ATTR6]]
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: nounwind willreturn
; CGSCC-LABEL: define {{[^@]+}}@test12-5
-; CGSCC-SAME: (i32* align 16 [[P:%.*]]) #[[ATTR7:[0-9]+]] {
-; CGSCC-NEXT: [[P_CAST:%.*]] = bitcast i32* [[P]] to i64*
-; CGSCC-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, i64* [[P_CAST]], i64 1
-; CGSCC-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, i64* [[ARRAYIDX0]], i64 3
-; CGSCC-NEXT: tail call void @use(i64* align 16 [[ARRAYIDX1]]) #[[ATTR7]]
+; CGSCC-SAME: (ptr align 16 [[P:%.*]]) #[[ATTR7:[0-9]+]] {
+; CGSCC-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; CGSCC-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; CGSCC-NEXT: tail call void @use(ptr align 16 [[ARRAYIDX1]]) #[[ATTR7]]
; CGSCC-NEXT: ret void
;
%p-cast = bitcast i32* %p to i64*
- %arrayidx0 = getelementptr i64, i64* %p-cast, i64 1
- %arrayidx1 = getelementptr i64, i64* %arrayidx0, i64 3
- tail call void @use(i64* align 16 %arrayidx1)
+ %arrayidx0 = getelementptr i64, ptr %p-cast, i64 1
+ %arrayidx1 = getelementptr i64, ptr %arrayidx0, i64 3
+ tail call void @use(ptr align 16 %arrayidx1)
ret void
}
-define void @test12-6(i32* align 4 %p) {
+define void @test12-6(ptr align 4 %p) {
; TUNIT: Function Attrs: nounwind willreturn
; TUNIT-LABEL: define {{[^@]+}}@test12-6
-; TUNIT-SAME: (i32* align 16 [[P:%.*]]) #[[ATTR6]] {
-; TUNIT-NEXT: [[P_CAST:%.*]] = bitcast i32* [[P]] to i64*
-; TUNIT-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, i64* [[P_CAST]], i64 0
-; TUNIT-NEXT: tail call void @use(i64* align 16 [[ARRAYIDX0]]) #[[ATTR6]]
+; TUNIT-SAME: (ptr align 16 [[P:%.*]]) #[[ATTR6]] {
+; TUNIT-NEXT: tail call void @use(ptr align 16 [[P]]) #[[ATTR6]]
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: nounwind willreturn
; CGSCC-LABEL: define {{[^@]+}}@test12-6
-; CGSCC-SAME: (i32* align 16 [[P:%.*]]) #[[ATTR7]] {
-; CGSCC-NEXT: [[P_CAST:%.*]] = bitcast i32* [[P]] to i64*
-; CGSCC-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, i64* [[P_CAST]], i64 0
-; CGSCC-NEXT: tail call void @use(i64* align 16 [[ARRAYIDX0]]) #[[ATTR7]]
+; CGSCC-SAME: (ptr align 16 [[P:%.*]]) #[[ATTR7]] {
+; CGSCC-NEXT: tail call void @use(ptr align 16 [[P]]) #[[ATTR7]]
; CGSCC-NEXT: ret void
;
%p-cast = bitcast i32* %p to i64*
- %arrayidx0 = getelementptr i64, i64* %p-cast, i64 0
- tail call void @use(i64* align 16 %arrayidx0)
+ tail call void @use(ptr align 16 %p-cast)
ret void
}
-define void @test13(i1 %c, i32* align 32 %dst) #0 {
+define void @test13(i1 %c, ptr align 32 %dst) #0 {
; TUNIT: Function Attrs: nofree noinline norecurse nosync nounwind willreturn memory(argmem: write) uwtable
; TUNIT-LABEL: define {{[^@]+}}@test13
-; TUNIT-SAME: (i1 noundef [[C:%.*]], i32* nocapture nofree writeonly align 32 [[DST:%.*]]) #[[ATTR7:[0-9]+]] {
+; TUNIT-SAME: (i1 noundef [[C:%.*]], ptr nocapture nofree writeonly align 32 [[DST:%.*]]) #[[ATTR7:[0-9]+]] {
; TUNIT-NEXT: br i1 [[C]], label [[TRUEBB:%.*]], label [[FALSEBB:%.*]]
; TUNIT: truebb:
; TUNIT-NEXT: br label [[END:%.*]]
; TUNIT: falsebb:
; TUNIT-NEXT: br label [[END]]
; TUNIT: end:
-; TUNIT-NEXT: [[PTR:%.*]] = phi i32* [ [[DST]], [[TRUEBB]] ], [ null, [[FALSEBB]] ]
-; TUNIT-NEXT: store i32 0, i32* [[PTR]], align 32
+; TUNIT-NEXT: [[PTR:%.*]] = phi ptr [ [[DST]], [[TRUEBB]] ], [ null, [[FALSEBB]] ]
+; TUNIT-NEXT: store i32 0, ptr [[PTR]], align 32
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: nofree noinline norecurse nosync nounwind willreturn memory(argmem: write) uwtable
; CGSCC-LABEL: define {{[^@]+}}@test13
-; CGSCC-SAME: (i1 noundef [[C:%.*]], i32* nocapture nofree writeonly align 32 [[DST:%.*]]) #[[ATTR8:[0-9]+]] {
+; CGSCC-SAME: (i1 noundef [[C:%.*]], ptr nocapture nofree writeonly align 32 [[DST:%.*]]) #[[ATTR8:[0-9]+]] {
; CGSCC-NEXT: br i1 [[C]], label [[TRUEBB:%.*]], label [[FALSEBB:%.*]]
; CGSCC: truebb:
; CGSCC-NEXT: br label [[END:%.*]]
; CGSCC: falsebb:
; CGSCC-NEXT: br label [[END]]
; CGSCC: end:
-; CGSCC-NEXT: [[PTR:%.*]] = phi i32* [ [[DST]], [[TRUEBB]] ], [ null, [[FALSEBB]] ]
-; CGSCC-NEXT: store i32 0, i32* [[PTR]], align 32
+; CGSCC-NEXT: [[PTR:%.*]] = phi ptr [ [[DST]], [[TRUEBB]] ], [ null, [[FALSEBB]] ]
+; CGSCC-NEXT: store i32 0, ptr [[PTR]], align 32
; CGSCC-NEXT: ret void
;
br i1 %c, label %truebb, label %falsebb
falsebb:
br label %end
end:
- %ptr = phi i32* [ %dst, %truebb ], [ null, %falsebb ]
- store i32 0, i32* %ptr
+ %ptr = phi ptr [ %dst, %truebb ], [ null, %falsebb ]
+ store i32 0, ptr %ptr
ret void
}
-define void @test13-1(i1 %c, i32* align 32 %dst) {
+define void @test13-1(i1 %c, ptr align 32 %dst) {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(write)
; TUNIT-LABEL: define {{[^@]+}}@test13-1
-; TUNIT-SAME: (i1 noundef [[C:%.*]], i32* nocapture nofree writeonly align 32 [[DST:%.*]]) #[[ATTR8:[0-9]+]] {
+; TUNIT-SAME: (i1 noundef [[C:%.*]], ptr nocapture nofree writeonly align 32 [[DST:%.*]]) #[[ATTR8:[0-9]+]] {
; TUNIT-NEXT: br i1 [[C]], label [[TRUEBB:%.*]], label [[FALSEBB:%.*]]
; TUNIT: truebb:
; TUNIT-NEXT: br label [[END:%.*]]
; TUNIT: falsebb:
; TUNIT-NEXT: br label [[END]]
; TUNIT: end:
-; TUNIT-NEXT: [[PTR:%.*]] = phi i32* [ [[DST]], [[TRUEBB]] ], [ inttoptr (i64 48 to i32*), [[FALSEBB]] ]
-; TUNIT-NEXT: store i32 0, i32* [[PTR]], align 16
+; TUNIT-NEXT: [[PTR:%.*]] = phi ptr [ [[DST]], [[TRUEBB]] ], [ inttoptr (i64 48 to ptr), [[FALSEBB]] ]
+; TUNIT-NEXT: store i32 0, ptr [[PTR]], align 16
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: nofree norecurse nosync nounwind willreturn memory(write)
; CGSCC-LABEL: define {{[^@]+}}@test13-1
-; CGSCC-SAME: (i1 noundef [[C:%.*]], i32* nocapture nofree writeonly align 32 [[DST:%.*]]) #[[ATTR9:[0-9]+]] {
+; CGSCC-SAME: (i1 noundef [[C:%.*]], ptr nocapture nofree writeonly align 32 [[DST:%.*]]) #[[ATTR9:[0-9]+]] {
; CGSCC-NEXT: br i1 [[C]], label [[TRUEBB:%.*]], label [[FALSEBB:%.*]]
; CGSCC: truebb:
; CGSCC-NEXT: br label [[END:%.*]]
; CGSCC: falsebb:
; CGSCC-NEXT: br label [[END]]
; CGSCC: end:
-; CGSCC-NEXT: [[PTR:%.*]] = phi i32* [ [[DST]], [[TRUEBB]] ], [ inttoptr (i64 48 to i32*), [[FALSEBB]] ]
-; CGSCC-NEXT: store i32 0, i32* [[PTR]], align 16
+; CGSCC-NEXT: [[PTR:%.*]] = phi ptr [ [[DST]], [[TRUEBB]] ], [ inttoptr (i64 48 to ptr), [[FALSEBB]] ]
+; CGSCC-NEXT: store i32 0, ptr [[PTR]], align 16
; CGSCC-NEXT: ret void
;
br i1 %c, label %truebb, label %falsebb
falsebb:
br label %end
end:
- %ptr = phi i32* [ %dst, %truebb ], [ inttoptr (i64 48 to i32*), %falsebb ]
- store i32 0, i32* %ptr
+ %ptr = phi ptr [ %dst, %truebb ], [ inttoptr (i64 48 to ptr), %falsebb ]
+ store i32 0, ptr %ptr
ret void
}
-define void @test13-2(i1 %c, i32* align 32 %dst) {
+define void @test13-2(i1 %c, ptr align 32 %dst) {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(write)
; TUNIT-LABEL: define {{[^@]+}}@test13-2
-; TUNIT-SAME: (i1 noundef [[C:%.*]], i32* nocapture nofree writeonly align 32 [[DST:%.*]]) #[[ATTR8]] {
+; TUNIT-SAME: (i1 noundef [[C:%.*]], ptr nocapture nofree writeonly align 32 [[DST:%.*]]) #[[ATTR8]] {
; TUNIT-NEXT: br i1 [[C]], label [[TRUEBB:%.*]], label [[FALSEBB:%.*]]
; TUNIT: truebb:
; TUNIT-NEXT: br label [[END:%.*]]
; TUNIT: falsebb:
; TUNIT-NEXT: br label [[END]]
; TUNIT: end:
-; TUNIT-NEXT: [[PTR:%.*]] = phi i32* [ [[DST]], [[TRUEBB]] ], [ inttoptr (i64 160 to i32*), [[FALSEBB]] ]
-; TUNIT-NEXT: store i32 0, i32* [[PTR]], align 32
+; TUNIT-NEXT: [[PTR:%.*]] = phi ptr [ [[DST]], [[TRUEBB]] ], [ inttoptr (i64 160 to ptr), [[FALSEBB]] ]
+; TUNIT-NEXT: store i32 0, ptr [[PTR]], align 32
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: nofree norecurse nosync nounwind willreturn memory(write)
; CGSCC-LABEL: define {{[^@]+}}@test13-2
-; CGSCC-SAME: (i1 noundef [[C:%.*]], i32* nocapture nofree writeonly align 32 [[DST:%.*]]) #[[ATTR9]] {
+; CGSCC-SAME: (i1 noundef [[C:%.*]], ptr nocapture nofree writeonly align 32 [[DST:%.*]]) #[[ATTR9]] {
; CGSCC-NEXT: br i1 [[C]], label [[TRUEBB:%.*]], label [[FALSEBB:%.*]]
; CGSCC: truebb:
; CGSCC-NEXT: br label [[END:%.*]]
; CGSCC: falsebb:
; CGSCC-NEXT: br label [[END]]
; CGSCC: end:
-; CGSCC-NEXT: [[PTR:%.*]] = phi i32* [ [[DST]], [[TRUEBB]] ], [ inttoptr (i64 160 to i32*), [[FALSEBB]] ]
-; CGSCC-NEXT: store i32 0, i32* [[PTR]], align 32
+; CGSCC-NEXT: [[PTR:%.*]] = phi ptr [ [[DST]], [[TRUEBB]] ], [ inttoptr (i64 160 to ptr), [[FALSEBB]] ]
+; CGSCC-NEXT: store i32 0, ptr [[PTR]], align 32
; CGSCC-NEXT: ret void
;
br i1 %c, label %truebb, label %falsebb
falsebb:
br label %end
end:
- %ptr = phi i32* [ %dst, %truebb ], [ inttoptr (i64 160 to i32*), %falsebb ]
- store i32 0, i32* %ptr
+ %ptr = phi ptr [ %dst, %truebb ], [ inttoptr (i64 160 to ptr), %falsebb ]
+ store i32 0, ptr %ptr
ret void
}
-define void @test13-3(i1 %c, i32* align 32 %dst) {
+define void @test13-3(i1 %c, ptr align 32 %dst) {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(write)
; TUNIT-LABEL: define {{[^@]+}}@test13-3
-; TUNIT-SAME: (i1 noundef [[C:%.*]], i32* nocapture nofree writeonly align 32 [[DST:%.*]]) #[[ATTR8]] {
+; TUNIT-SAME: (i1 noundef [[C:%.*]], ptr nocapture nofree writeonly align 32 [[DST:%.*]]) #[[ATTR8]] {
; TUNIT-NEXT: br i1 [[C]], label [[TRUEBB:%.*]], label [[FALSEBB:%.*]]
; TUNIT: truebb:
; TUNIT-NEXT: br label [[END:%.*]]
; TUNIT: falsebb:
; TUNIT-NEXT: br label [[END]]
; TUNIT: end:
-; TUNIT-NEXT: [[PTR:%.*]] = phi i32* [ [[DST]], [[TRUEBB]] ], [ inttoptr (i64 128 to i32*), [[FALSEBB]] ]
-; TUNIT-NEXT: store i32 0, i32* [[PTR]], align 32
+; TUNIT-NEXT: [[PTR:%.*]] = phi ptr [ [[DST]], [[TRUEBB]] ], [ inttoptr (i64 128 to ptr), [[FALSEBB]] ]
+; TUNIT-NEXT: store i32 0, ptr [[PTR]], align 32
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: nofree norecurse nosync nounwind willreturn memory(write)
; CGSCC-LABEL: define {{[^@]+}}@test13-3
-; CGSCC-SAME: (i1 noundef [[C:%.*]], i32* nocapture nofree writeonly align 32 [[DST:%.*]]) #[[ATTR9]] {
+; CGSCC-SAME: (i1 noundef [[C:%.*]], ptr nocapture nofree writeonly align 32 [[DST:%.*]]) #[[ATTR9]] {
; CGSCC-NEXT: br i1 [[C]], label [[TRUEBB:%.*]], label [[FALSEBB:%.*]]
; CGSCC: truebb:
; CGSCC-NEXT: br label [[END:%.*]]
; CGSCC: falsebb:
; CGSCC-NEXT: br label [[END]]
; CGSCC: end:
-; CGSCC-NEXT: [[PTR:%.*]] = phi i32* [ [[DST]], [[TRUEBB]] ], [ inttoptr (i64 128 to i32*), [[FALSEBB]] ]
-; CGSCC-NEXT: store i32 0, i32* [[PTR]], align 32
+; CGSCC-NEXT: [[PTR:%.*]] = phi ptr [ [[DST]], [[TRUEBB]] ], [ inttoptr (i64 128 to ptr), [[FALSEBB]] ]
+; CGSCC-NEXT: store i32 0, ptr [[PTR]], align 32
; CGSCC-NEXT: ret void
;
br i1 %c, label %truebb, label %falsebb
falsebb:
br label %end
end:
- %ptr = phi i32* [ %dst, %truebb ], [ inttoptr (i64 128 to i32*), %falsebb ]
- store i32 0, i32* %ptr
+ %ptr = phi ptr [ %dst, %truebb ], [ inttoptr (i64 128 to ptr), %falsebb ]
+ store i32 0, ptr %ptr
ret void
}
; Don't crash on ptr2int/int2ptr uses.
-define i64 @ptr2int(i32* %p) {
+define i64 @ptr2int(ptr %p) {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; TUNIT-LABEL: define {{[^@]+}}@ptr2int
-; TUNIT-SAME: (i32* nofree readnone [[P:%.*]]) #[[ATTR9:[0-9]+]] {
-; TUNIT-NEXT: [[P2I:%.*]] = ptrtoint i32* [[P]] to i64
+; TUNIT-SAME: (ptr nofree readnone [[P:%.*]]) #[[ATTR9:[0-9]+]] {
+; TUNIT-NEXT: [[P2I:%.*]] = ptrtoint ptr [[P]] to i64
; TUNIT-NEXT: ret i64 [[P2I]]
;
; CGSCC: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CGSCC-LABEL: define {{[^@]+}}@ptr2int
-; CGSCC-SAME: (i32* nofree readnone [[P:%.*]]) #[[ATTR10:[0-9]+]] {
-; CGSCC-NEXT: [[P2I:%.*]] = ptrtoint i32* [[P]] to i64
+; CGSCC-SAME: (ptr nofree readnone [[P:%.*]]) #[[ATTR10:[0-9]+]] {
+; CGSCC-NEXT: [[P2I:%.*]] = ptrtoint ptr [[P]] to i64
; CGSCC-NEXT: ret i64 [[P2I]]
;
%p2i = ptrtoint i32* %p to i64
ret i64 %p2i
}
-define i64* @int2ptr(i64 %i) {
+define ptr @int2ptr(i64 %i) {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; TUNIT-LABEL: define {{[^@]+}}@int2ptr
; TUNIT-SAME: (i64 [[I:%.*]]) #[[ATTR9]] {
-; TUNIT-NEXT: [[I2P:%.*]] = inttoptr i64 [[I]] to i64*
-; TUNIT-NEXT: ret i64* [[I2P]]
+; TUNIT-NEXT: [[I2P:%.*]] = inttoptr i64 [[I]] to ptr
+; TUNIT-NEXT: ret ptr [[I2P]]
;
; CGSCC: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CGSCC-LABEL: define {{[^@]+}}@int2ptr
; CGSCC-SAME: (i64 [[I:%.*]]) #[[ATTR10]] {
-; CGSCC-NEXT: [[I2P:%.*]] = inttoptr i64 [[I]] to i64*
-; CGSCC-NEXT: ret i64* [[I2P]]
+; CGSCC-NEXT: [[I2P:%.*]] = inttoptr i64 [[I]] to ptr
+; CGSCC-NEXT: ret ptr [[I2P]]
;
%i2p = inttoptr i64 %i to i64*
- ret i64* %i2p
+ ret ptr %i2p
}
; Use the store alignment only for the pointer operand.
-define void @aligned_store(i8* %Value, i8** %Ptr) {
+define void @aligned_store(ptr %Value, ptr %Ptr) {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: write)
; TUNIT-LABEL: define {{[^@]+}}@aligned_store
-; TUNIT-SAME: (i8* nofree writeonly [[VALUE:%.*]], i8** nocapture nofree noundef nonnull writeonly align 32 dereferenceable(8) [[PTR:%.*]]) #[[ATTR5]] {
-; TUNIT-NEXT: store i8* [[VALUE]], i8** [[PTR]], align 32
+; TUNIT-SAME: (ptr nofree writeonly [[VALUE:%.*]], ptr nocapture nofree noundef nonnull writeonly align 32 dereferenceable(8) [[PTR:%.*]]) #[[ATTR5]] {
+; TUNIT-NEXT: store ptr [[VALUE]], ptr [[PTR]], align 32
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: write)
; CGSCC-LABEL: define {{[^@]+}}@aligned_store
-; CGSCC-SAME: (i8* nofree writeonly [[VALUE:%.*]], i8** nocapture nofree noundef nonnull writeonly align 32 dereferenceable(8) [[PTR:%.*]]) #[[ATTR6]] {
-; CGSCC-NEXT: store i8* [[VALUE]], i8** [[PTR]], align 32
+; CGSCC-SAME: (ptr nofree writeonly [[VALUE:%.*]], ptr nocapture nofree noundef nonnull writeonly align 32 dereferenceable(8) [[PTR:%.*]]) #[[ATTR6]] {
+; CGSCC-NEXT: store ptr [[VALUE]], ptr [[PTR]], align 32
; CGSCC-NEXT: ret void
;
store i8* %Value, i8** %Ptr, align 32
ret void
}
-declare i8* @some_func(i8*)
-define void @align_call_op_not_store(i8* align 2048 %arg) {
+declare ptr @some_func(ptr)
+define void @align_call_op_not_store(ptr align 2048 %arg) {
; CHECK-LABEL: define {{[^@]+}}@align_call_op_not_store
-; CHECK-SAME: (i8* align 2048 [[ARG:%.*]]) {
-; CHECK-NEXT: [[UNKNOWN:%.*]] = call i8* @some_func(i8* align 2048 [[ARG]])
-; CHECK-NEXT: store i8 0, i8* [[UNKNOWN]], align 1
+; CHECK-SAME: (ptr align 2048 [[ARG:%.*]]) {
+; CHECK-NEXT: [[UNKNOWN:%.*]] = call ptr @some_func(ptr align 2048 [[ARG]])
+; CHECK-NEXT: store i8 0, ptr [[UNKNOWN]], align 1
; CHECK-NEXT: ret void
;
%unknown = call i8* @some_func(i8* %arg)
- store i8 0, i8* %unknown
+ store i8 0, ptr %unknown
ret void
}
-define void @align_store_after_bc(i32* align 2048 %arg) {
+define void @align_store_after_bc(ptr align 2048 %arg) {
;
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: write)
; TUNIT-LABEL: define {{[^@]+}}@align_store_after_bc
-; TUNIT-SAME: (i32* nocapture nofree nonnull writeonly align 2048 dereferenceable(1) [[ARG:%.*]]) #[[ATTR5]] {
-; TUNIT-NEXT: [[BC:%.*]] = bitcast i32* [[ARG]] to i8*
-; TUNIT-NEXT: store i8 0, i8* [[BC]], align 2048
+; TUNIT-SAME: (ptr nocapture nofree nonnull writeonly align 2048 dereferenceable(1) [[ARG:%.*]]) #[[ATTR5]] {
+; TUNIT-NEXT: store i8 0, ptr [[ARG]], align 2048
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: write)
; CGSCC-LABEL: define {{[^@]+}}@align_store_after_bc
-; CGSCC-SAME: (i32* nocapture nofree nonnull writeonly align 2048 dereferenceable(1) [[ARG:%.*]]) #[[ATTR6]] {
-; CGSCC-NEXT: [[BC:%.*]] = bitcast i32* [[ARG]] to i8*
-; CGSCC-NEXT: store i8 0, i8* [[BC]], align 2048
+; CGSCC-SAME: (ptr nocapture nofree nonnull writeonly align 2048 dereferenceable(1) [[ARG:%.*]]) #[[ATTR6]] {
+; CGSCC-NEXT: store i8 0, ptr [[ARG]], align 2048
; CGSCC-NEXT: ret void
;
%bc = bitcast i32* %arg to i8*
- store i8 0, i8* %bc
+ store i8 0, ptr %bc
ret void
}
; Make sure we do not annotate the callee of a must-tail call with an alignment
; we cannot also put on the caller.
@cnd = external global i1
-define i32 @musttail_callee_1(i32* %p) {
+define i32 @musttail_callee_1(ptr %p) {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: read)
; TUNIT-LABEL: define {{[^@]+}}@musttail_callee_1
-; TUNIT-SAME: (i32* nocapture nofree noundef nonnull readonly dereferenceable(4) [[P:%.*]]) #[[ATTR4]] {
-; TUNIT-NEXT: [[V:%.*]] = load i32, i32* [[P]], align 32
+; TUNIT-SAME: (ptr nocapture nofree noundef nonnull readonly dereferenceable(4) [[P:%.*]]) #[[ATTR4]] {
+; TUNIT-NEXT: [[V:%.*]] = load i32, ptr [[P]], align 32
; TUNIT-NEXT: ret i32 [[V]]
;
; CGSCC: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: read)
; CGSCC-LABEL: define {{[^@]+}}@musttail_callee_1
-; CGSCC-SAME: (i32* nocapture nofree noundef nonnull readonly dereferenceable(4) [[P:%.*]]) #[[ATTR5]] {
-; CGSCC-NEXT: [[V:%.*]] = load i32, i32* [[P]], align 32
+; CGSCC-SAME: (ptr nocapture nofree noundef nonnull readonly dereferenceable(4) [[P:%.*]]) #[[ATTR5]] {
+; CGSCC-NEXT: [[V:%.*]] = load i32, ptr [[P]], align 32
; CGSCC-NEXT: ret i32 [[V]]
;
%v = load i32, i32* %p, align 32
ret i32 %v
}
-define i32 @musttail_caller_1(i32* %p) {
+define i32 @musttail_caller_1(ptr %p) {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(read)
; TUNIT-LABEL: define {{[^@]+}}@musttail_caller_1
-; TUNIT-SAME: (i32* nocapture nofree readonly [[P:%.*]]) #[[ATTR10:[0-9]+]] {
-; TUNIT-NEXT: [[C:%.*]] = load i1, i1* @cnd, align 1
+; TUNIT-SAME: (ptr nocapture nofree readonly [[P:%.*]]) #[[ATTR10:[0-9]+]] {
+; TUNIT-NEXT: [[C:%.*]] = load i1, ptr @cnd, align 1
; TUNIT-NEXT: br i1 [[C]], label [[MT:%.*]], label [[EXIT:%.*]]
; TUNIT: mt:
-; TUNIT-NEXT: [[V:%.*]] = musttail call i32 @musttail_callee_1(i32* nocapture nofree readonly [[P]]) #[[ATTR12:[0-9]+]]
+; TUNIT-NEXT: [[V:%.*]] = musttail call i32 @musttail_callee_1(ptr nocapture nofree readonly [[P]]) #[[ATTR12:[0-9]+]]
; TUNIT-NEXT: ret i32 [[V]]
; TUNIT: exit:
; TUNIT-NEXT: ret i32 0
;
; CGSCC: Function Attrs: nofree nosync nounwind willreturn memory(read)
; CGSCC-LABEL: define {{[^@]+}}@musttail_caller_1
-; CGSCC-SAME: (i32* nocapture nofree readonly [[P:%.*]]) #[[ATTR11:[0-9]+]] {
-; CGSCC-NEXT: [[C:%.*]] = load i1, i1* @cnd, align 1
+; CGSCC-SAME: (ptr nocapture nofree readonly [[P:%.*]]) #[[ATTR11:[0-9]+]] {
+; CGSCC-NEXT: [[C:%.*]] = load i1, ptr @cnd, align 1
; CGSCC-NEXT: br i1 [[C]], label [[MT:%.*]], label [[EXIT:%.*]]
; CGSCC: mt:
-; CGSCC-NEXT: [[V:%.*]] = musttail call i32 @musttail_callee_1(i32* nocapture nofree noundef nonnull readonly dereferenceable(4) [[P]]) #[[ATTR13]]
+; CGSCC-NEXT: [[V:%.*]] = musttail call i32 @musttail_callee_1(ptr nocapture nofree noundef nonnull readonly dereferenceable(4) [[P]]) #[[ATTR13]]
; CGSCC-NEXT: ret i32 [[V]]
; CGSCC: exit:
; CGSCC-NEXT: ret i32 0
%c = load i1, i1* @cnd
br i1 %c, label %mt, label %exit
mt:
- %v = musttail call i32 @musttail_callee_1(i32* %p)
+ %v = musttail call i32 @musttail_callee_1(ptr %p)
ret i32 %v
exit:
ret i32 0
}
-define i32* @checkAndAdvance(i32* align(16) %p) {
+define ptr @checkAndAdvance(ptr align(16) %p) {
; TUNIT: Function Attrs: nounwind
; TUNIT-LABEL: define {{[^@]+}}@checkAndAdvance
-; TUNIT-SAME: (i32* noundef nonnull readonly align 16 dereferenceable(4) "no-capture-maybe-returned" [[P:%.*]]) #[[ATTR2]] {
+; TUNIT-SAME: (ptr noundef nonnull readonly align 16 dereferenceable(4) "no-capture-maybe-returned" [[P:%.*]]) #[[ATTR2]] {
; TUNIT-NEXT: entry:
-; TUNIT-NEXT: [[TMP0:%.*]] = load i32, i32* [[P]], align 16
+; TUNIT-NEXT: [[TMP0:%.*]] = load i32, ptr [[P]], align 16
; TUNIT-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
; TUNIT-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[RETURN:%.*]]
; TUNIT: if.then:
-; TUNIT-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 4
-; TUNIT-NEXT: [[CALL:%.*]] = call i32* @checkAndAdvance(i32* nonnull readonly align 16 "no-capture-maybe-returned" [[ADD_PTR]]) #[[ATTR2]]
+; TUNIT-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 4
+; TUNIT-NEXT: [[CALL:%.*]] = call ptr @checkAndAdvance(ptr nonnull readonly align 16 "no-capture-maybe-returned" [[ADD_PTR]]) #[[ATTR2]]
; TUNIT-NEXT: br label [[RETURN]]
; TUNIT: return:
-; TUNIT-NEXT: [[RETVAL_0:%.*]] = phi i32* [ [[ADD_PTR]], [[IF_THEN]] ], [ [[P]], [[ENTRY:%.*]] ]
-; TUNIT-NEXT: call void @user_i32_ptr(i32* noalias nocapture nonnull readnone align 16 [[RETVAL_0]]) #[[ATTR2]]
-; TUNIT-NEXT: ret i32* [[RETVAL_0]]
+; TUNIT-NEXT: [[RETVAL_0:%.*]] = phi ptr [ [[ADD_PTR]], [[IF_THEN]] ], [ [[P]], [[ENTRY:%.*]] ]
+; TUNIT-NEXT: call void @user_i32_ptr(ptr noalias nocapture nonnull readnone align 16 [[RETVAL_0]]) #[[ATTR2]]
+; TUNIT-NEXT: ret ptr [[RETVAL_0]]
;
; CGSCC: Function Attrs: nounwind
; CGSCC-LABEL: define {{[^@]+}}@checkAndAdvance
-; CGSCC-SAME: (i32* noundef nonnull readonly align 16 dereferenceable(4) "no-capture-maybe-returned" [[P:%.*]]) #[[ATTR3]] {
+; CGSCC-SAME: (ptr noundef nonnull readonly align 16 dereferenceable(4) "no-capture-maybe-returned" [[P:%.*]]) #[[ATTR3]] {
; CGSCC-NEXT: entry:
-; CGSCC-NEXT: [[TMP0:%.*]] = load i32, i32* [[P]], align 16
+; CGSCC-NEXT: [[TMP0:%.*]] = load i32, ptr [[P]], align 16
; CGSCC-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
; CGSCC-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[RETURN:%.*]]
; CGSCC: if.then:
-; CGSCC-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 4
-; CGSCC-NEXT: [[CALL:%.*]] = call i32* @checkAndAdvance(i32* nonnull readonly align 16 "no-capture-maybe-returned" [[ADD_PTR]]) #[[ATTR3]]
+; CGSCC-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 4
+; CGSCC-NEXT: [[CALL:%.*]] = call ptr @checkAndAdvance(ptr nonnull readonly align 16 "no-capture-maybe-returned" [[ADD_PTR]]) #[[ATTR3]]
; CGSCC-NEXT: br label [[RETURN]]
; CGSCC: return:
-; CGSCC-NEXT: [[RETVAL_0:%.*]] = phi i32* [ [[ADD_PTR]], [[IF_THEN]] ], [ [[P]], [[ENTRY:%.*]] ]
-; CGSCC-NEXT: call void @user_i32_ptr(i32* noalias nocapture nonnull readnone align 16 [[RETVAL_0]]) #[[ATTR3]]
-; CGSCC-NEXT: ret i32* [[RETVAL_0]]
+; CGSCC-NEXT: [[RETVAL_0:%.*]] = phi ptr [ [[ADD_PTR]], [[IF_THEN]] ], [ [[P]], [[ENTRY:%.*]] ]
+; CGSCC-NEXT: call void @user_i32_ptr(ptr noalias nocapture nonnull readnone align 16 [[RETVAL_0]]) #[[ATTR3]]
+; CGSCC-NEXT: ret ptr [[RETVAL_0]]
;
entry:
- %0 = load i32, i32* %p, align 4
+ %0 = load i32, ptr %p, align 4
%cmp = icmp eq i32 %0, 0
br i1 %cmp, label %if.then, label %return
if.then: ; preds = %entry
- %add.ptr = getelementptr inbounds i32, i32* %p, i64 4
- %call = call i32* @checkAndAdvance(i32* nonnull %add.ptr)
+ %add.ptr = getelementptr inbounds i32, ptr %p, i64 4
+ %call = call ptr @checkAndAdvance(ptr nonnull %add.ptr)
br label %return
return: ; preds = %entry, %if.then
- %retval.0 = phi i32* [ %call, %if.then ], [ %p, %entry ]
- call void @user_i32_ptr(i32* %retval.0)
- ret i32* %retval.0
+ %retval.0 = phi ptr [ %call, %if.then ], [ %p, %entry ]
+ call void @user_i32_ptr(ptr %retval.0)
+ ret ptr %retval.0
}
; FIXME: align 4 should not be propagated to the caller's p unless there is noundef
-define void @align4_caller(i8* %p) {
+define void @align4_caller(ptr %p) {
; CHECK-LABEL: define {{[^@]+}}@align4_caller
-; CHECK-SAME: (i8* align 4 [[P:%.*]]) {
-; CHECK-NEXT: call void @align4_callee(i8* align 4 [[P]])
+; CHECK-SAME: (ptr align 4 [[P:%.*]]) {
+; CHECK-NEXT: call void @align4_callee(ptr align 4 [[P]])
; CHECK-NEXT: ret void
;
call void @align4_callee(i8* %p)
ret void
}
-declare void @align4_callee(i8* align(4) %p)
+declare void @align4_callee(ptr align(4) %p)
@G = global i8 0, align 32
-define internal i8* @aligned_8_return(i8* %a, i1 %c1, i1 %c2) norecurse {
+define internal ptr @aligned_8_return(ptr %a, i1 %c1, i1 %c2) norecurse {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; TUNIT-LABEL: define {{[^@]+}}@aligned_8_return
-; TUNIT-SAME: (i8* noalias nofree readnone align 16 "no-capture-maybe-returned" [[A:%.*]], i1 noundef [[C1:%.*]], i1 [[C2:%.*]]) #[[ATTR9]] {
-; TUNIT-NEXT: [[STACK:%.*]] = alloca i8*, align 8
+; TUNIT-SAME: (ptr noalias nofree readnone align 16 "no-capture-maybe-returned" [[A:%.*]], i1 noundef [[C1:%.*]], i1 [[C2:%.*]]) #[[ATTR9]] {
+; TUNIT-NEXT: [[STACK:%.*]] = alloca ptr, align 8
; TUNIT-NEXT: br i1 [[C1]], label [[T:%.*]], label [[F:%.*]]
; TUNIT: t:
-; TUNIT-NEXT: [[GEP:%.*]] = getelementptr i8, i8* @G, i32 8
-; TUNIT-NEXT: [[SEL:%.*]] = select i1 [[C2]], i8* [[A]], i8* [[GEP]]
-; TUNIT-NEXT: store i8* [[SEL]], i8** [[STACK]], align 8
+; TUNIT-NEXT: [[GEP:%.*]] = getelementptr i8, ptr @G, i32 8
+; TUNIT-NEXT: [[SEL:%.*]] = select i1 [[C2]], ptr [[A]], ptr [[GEP]]
+; TUNIT-NEXT: store ptr [[SEL]], ptr [[STACK]], align 8
; TUNIT-NEXT: br label [[END:%.*]]
; TUNIT: f:
-; TUNIT-NEXT: store i8* @G, i8** [[STACK]], align 8
+; TUNIT-NEXT: store ptr @G, ptr [[STACK]], align 8
; TUNIT-NEXT: br label [[END]]
; TUNIT: end:
-; TUNIT-NEXT: [[L:%.*]] = load i8*, i8** [[STACK]], align 8
-; TUNIT-NEXT: ret i8* [[L]]
+; TUNIT-NEXT: [[L:%.*]] = load ptr, ptr [[STACK]], align 8
+; TUNIT-NEXT: ret ptr [[L]]
;
; CGSCC: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CGSCC-LABEL: define {{[^@]+}}@aligned_8_return
-; CGSCC-SAME: (i8* noalias nofree readnone align 16 "no-capture-maybe-returned" [[A:%.*]], i1 noundef [[C1:%.*]], i1 [[C2:%.*]]) #[[ATTR10]] {
-; CGSCC-NEXT: [[STACK:%.*]] = alloca i8*, align 8
+; CGSCC-SAME: (ptr noalias nofree readnone align 16 "no-capture-maybe-returned" [[A:%.*]], i1 noundef [[C1:%.*]], i1 [[C2:%.*]]) #[[ATTR10]] {
+; CGSCC-NEXT: [[STACK:%.*]] = alloca ptr, align 8
; CGSCC-NEXT: br i1 [[C1]], label [[T:%.*]], label [[F:%.*]]
; CGSCC: t:
-; CGSCC-NEXT: [[GEP:%.*]] = getelementptr i8, i8* @G, i32 8
-; CGSCC-NEXT: [[SEL:%.*]] = select i1 [[C2]], i8* [[A]], i8* [[GEP]]
-; CGSCC-NEXT: store i8* [[SEL]], i8** [[STACK]], align 8
+; CGSCC-NEXT: [[GEP:%.*]] = getelementptr i8, ptr @G, i32 8
+; CGSCC-NEXT: [[SEL:%.*]] = select i1 [[C2]], ptr [[A]], ptr [[GEP]]
+; CGSCC-NEXT: store ptr [[SEL]], ptr [[STACK]], align 8
; CGSCC-NEXT: br label [[END:%.*]]
; CGSCC: f:
-; CGSCC-NEXT: store i8* @G, i8** [[STACK]], align 8
+; CGSCC-NEXT: store ptr @G, ptr [[STACK]], align 8
; CGSCC-NEXT: br label [[END]]
; CGSCC: end:
-; CGSCC-NEXT: [[L:%.*]] = load i8*, i8** [[STACK]], align 8
-; CGSCC-NEXT: ret i8* [[L]]
+; CGSCC-NEXT: [[L:%.*]] = load ptr, ptr [[STACK]], align 8
+; CGSCC-NEXT: ret ptr [[L]]
;
%stack = alloca i8*
br i1 %c1, label %t, label %f
t:
- %gep = getelementptr i8, i8* @G, i32 8
- %sel = select i1 %c2, i8* %a, i8* %gep
- store i8* %sel, i8** %stack
+ %gep = getelementptr i8, ptr @G, i32 8
+ %sel = select i1 %c2, ptr %a, ptr %gep
+ store ptr %sel, ptr %stack
br label %end
f:
- store i8* @G, i8** %stack
+ store ptr @G, ptr %stack
br label %end
end:
- %l = load i8*, i8** %stack
- ret i8* %l
+ %l = load ptr, ptr %stack
+ ret ptr %l
}
-define i8* @aligned_8_return_caller(i8* align(16) %a, i1 %c1, i1 %c2) {
+define ptr @aligned_8_return_caller(ptr align(16) %a, i1 %c1, i1 %c2) {
; TUNIT: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; TUNIT-LABEL: define {{[^@]+}}@aligned_8_return_caller
-; TUNIT-SAME: (i8* nofree readnone align 16 "no-capture-maybe-returned" [[A:%.*]], i1 [[C1:%.*]], i1 [[C2:%.*]]) #[[ATTR9]] {
-; TUNIT-NEXT: [[R:%.*]] = call align 8 i8* @aligned_8_return(i8* noalias nofree readnone align 16 "no-capture-maybe-returned" [[A]], i1 [[C1]], i1 [[C2]]) #[[ATTR12]]
-; TUNIT-NEXT: ret i8* [[R]]
+; TUNIT-SAME: (ptr nofree readnone align 16 "no-capture-maybe-returned" [[A:%.*]], i1 [[C1:%.*]], i1 [[C2:%.*]]) #[[ATTR9]] {
+; TUNIT-NEXT: [[R:%.*]] = call align 8 ptr @aligned_8_return(ptr noalias nofree readnone align 16 "no-capture-maybe-returned" [[A]], i1 [[C1]], i1 [[C2]]) #[[ATTR12]]
+; TUNIT-NEXT: ret ptr [[R]]
;
; CGSCC: Function Attrs: nofree nosync nounwind willreturn memory(none)
; CGSCC-LABEL: define {{[^@]+}}@aligned_8_return_caller
-; CGSCC-SAME: (i8* nofree readnone align 16 [[A:%.*]], i1 noundef [[C1:%.*]], i1 [[C2:%.*]]) #[[ATTR12:[0-9]+]] {
-; CGSCC-NEXT: [[R:%.*]] = call align 8 i8* @aligned_8_return(i8* noalias nofree readnone align 16 [[A]], i1 noundef [[C1]], i1 [[C2]]) #[[ATTR13]]
-; CGSCC-NEXT: ret i8* [[R]]
+; CGSCC-SAME: (ptr nofree readnone align 16 [[A:%.*]], i1 noundef [[C1:%.*]], i1 [[C2:%.*]]) #[[ATTR12:[0-9]+]] {
+; CGSCC-NEXT: [[R:%.*]] = call align 8 ptr @aligned_8_return(ptr noalias nofree readnone align 16 [[A]], i1 noundef [[C1]], i1 [[C2]]) #[[ATTR13]]
+; CGSCC-NEXT: ret ptr [[R]]
;
%r = call i8* @aligned_8_return(i8* %a, i1 %c1, i1 %c2)
- ret i8* %r
+ ret ptr %r
}
attributes #0 = { nounwind uwtable noinline }
; Determine dereference-ability before unused loads get deleted:
; https://bugs.llvm.org/show_bug.cgi?id=21780
-define <4 x double> @PR21780(double* %ptr) {
+define <4 x double> @PR21780(ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: read)
; CHECK-LABEL: define {{[^@]+}}@PR21780
-; CHECK-SAME: (double* nocapture nofree noundef nonnull readonly align 8 dereferenceable(32) [[PTR:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[PTR]], i64 1
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[PTR]], i64 2
-; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[PTR]], i64 3
-; CHECK-NEXT: [[T0:%.*]] = load double, double* [[PTR]], align 8
-; CHECK-NEXT: [[T1:%.*]] = load double, double* [[ARRAYIDX1]], align 8
-; CHECK-NEXT: [[T2:%.*]] = load double, double* [[ARRAYIDX2]], align 8
-; CHECK-NEXT: [[T3:%.*]] = load double, double* [[ARRAYIDX3]], align 8
+; CHECK-SAME: (ptr nocapture nofree noundef nonnull readonly align 8 dereferenceable(32) [[PTR:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, ptr [[PTR]], i64 1
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, ptr [[PTR]], i64 2
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, ptr [[PTR]], i64 3
+; CHECK-NEXT: [[T0:%.*]] = load double, ptr [[PTR]], align 8
+; CHECK-NEXT: [[T1:%.*]] = load double, ptr [[ARRAYIDX1]], align 8
+; CHECK-NEXT: [[T2:%.*]] = load double, ptr [[ARRAYIDX2]], align 8
+; CHECK-NEXT: [[T3:%.*]] = load double, ptr [[ARRAYIDX3]], align 8
; CHECK-NEXT: [[VECINIT0:%.*]] = insertelement <4 x double> poison, double [[T0]], i32 0
; CHECK-NEXT: [[VECINIT1:%.*]] = insertelement <4 x double> [[VECINIT0]], double [[T1]], i32 1
; CHECK-NEXT: [[VECINIT2:%.*]] = insertelement <4 x double> [[VECINIT1]], double [[T2]], i32 2
;
; GEP of index 0 is simplified away.
- %arrayidx1 = getelementptr inbounds double, double* %ptr, i64 1
- %arrayidx2 = getelementptr inbounds double, double* %ptr, i64 2
- %arrayidx3 = getelementptr inbounds double, double* %ptr, i64 3
+ %arrayidx1 = getelementptr inbounds double, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr inbounds double, ptr %ptr, i64 2
+ %arrayidx3 = getelementptr inbounds double, ptr %ptr, i64 3
- %t0 = load double, double* %ptr, align 8
- %t1 = load double, double* %arrayidx1, align 8
- %t2 = load double, double* %arrayidx2, align 8
- %t3 = load double, double* %arrayidx3, align 8
+ %t0 = load double, ptr %ptr, align 8
+ %t1 = load double, ptr %arrayidx1, align 8
+ %t2 = load double, ptr %arrayidx2, align 8
+ %t3 = load double, ptr %arrayidx3, align 8
%vecinit0 = insertelement <4 x double> poison, double %t0, i32 0
%vecinit1 = insertelement <4 x double> %vecinit0, double %t1, i32 1
}
-define double @PR21780_only_access3_with_inbounds(double* %ptr) {
+define double @PR21780_only_access3_with_inbounds(ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: read)
; CHECK-LABEL: define {{[^@]+}}@PR21780_only_access3_with_inbounds
-; CHECK-SAME: (double* nocapture nofree nonnull readonly align 8 dereferenceable(32) [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[PTR]], i64 3
-; CHECK-NEXT: [[T3:%.*]] = load double, double* [[ARRAYIDX3]], align 8
+; CHECK-SAME: (ptr nocapture nofree nonnull readonly align 8 dereferenceable(32) [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, ptr [[PTR]], i64 3
+; CHECK-NEXT: [[T3:%.*]] = load double, ptr [[ARRAYIDX3]], align 8
; CHECK-NEXT: ret double [[T3]]
;
%arrayidx3 = getelementptr inbounds double, double* %ptr, i64 3
- %t3 = load double, double* %arrayidx3, align 8
+ %t3 = load double, ptr %arrayidx3, align 8
ret double %t3
}
-define double @PR21780_only_access3_without_inbounds(double* %ptr) {
+define double @PR21780_only_access3_without_inbounds(ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: read)
; CHECK-LABEL: define {{[^@]+}}@PR21780_only_access3_without_inbounds
-; CHECK-SAME: (double* nocapture nofree readonly align 8 [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr double, double* [[PTR]], i64 3
-; CHECK-NEXT: [[T3:%.*]] = load double, double* [[ARRAYIDX3]], align 8
+; CHECK-SAME: (ptr nocapture nofree readonly align 8 [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr double, ptr [[PTR]], i64 3
+; CHECK-NEXT: [[T3:%.*]] = load double, ptr [[ARRAYIDX3]], align 8
; CHECK-NEXT: ret double [[T3]]
;
%arrayidx3 = getelementptr double, double* %ptr, i64 3
- %t3 = load double, double* %arrayidx3, align 8
+ %t3 = load double, ptr %arrayidx3, align 8
ret double %t3
}
-define double @PR21780_without_inbounds(double* %ptr) {
+define double @PR21780_without_inbounds(ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: read)
; CHECK-LABEL: define {{[^@]+}}@PR21780_without_inbounds
-; CHECK-SAME: (double* nocapture nofree noundef nonnull readonly align 8 dereferenceable(32) [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr double, double* [[PTR]], i64 3
-; CHECK-NEXT: [[T3:%.*]] = load double, double* [[ARRAYIDX3]], align 8
+; CHECK-SAME: (ptr nocapture nofree noundef nonnull readonly align 8 dereferenceable(32) [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr double, ptr [[PTR]], i64 3
+; CHECK-NEXT: [[T3:%.*]] = load double, ptr [[ARRAYIDX3]], align 8
; CHECK-NEXT: ret double [[T3]]
;
%arrayidx1 = getelementptr double, double* %ptr, i64 1
- %arrayidx2 = getelementptr double, double* %ptr, i64 2
- %arrayidx3 = getelementptr double, double* %ptr, i64 3
+ %arrayidx2 = getelementptr double, ptr %ptr, i64 2
+ %arrayidx3 = getelementptr double, ptr %ptr, i64 3
- %t0 = load double, double* %ptr, align 8
- %t1 = load double, double* %arrayidx1, align 8
- %t2 = load double, double* %arrayidx2, align 8
- %t3 = load double, double* %arrayidx3, align 8
+ %t0 = load double, ptr %ptr, align 8
+ %t1 = load double, ptr %arrayidx1, align 8
+ %t2 = load double, ptr %arrayidx2, align 8
+ %t3 = load double, ptr %arrayidx3, align 8
ret double %t3
}
; Unsimplified, but still valid. Also, throw in some bogus arguments.
-define void @gep0(i8* %unused, i8* %other, i8* %ptr) {
+define void @gep0(ptr %unused, ptr %other, ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
; CHECK-LABEL: define {{[^@]+}}@gep0
-; CHECK-SAME: (i8* nocapture nofree readnone [[UNUSED:%.*]], i8* nocapture nofree noundef nonnull writeonly dereferenceable(1) [[OTHER:%.*]], i8* nocapture nofree nonnull readonly dereferenceable(3) [[PTR:%.*]]) #[[ATTR1:[0-9]+]] {
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr i8, i8* [[PTR]], i64 2
-; CHECK-NEXT: [[T2:%.*]] = load i8, i8* [[ARRAYIDX2]], align 1
-; CHECK-NEXT: store i8 [[T2]], i8* [[OTHER]], align 1
+; CHECK-SAME: (ptr nocapture nofree readnone [[UNUSED:%.*]], ptr nocapture nofree noundef nonnull writeonly dereferenceable(1) [[OTHER:%.*]], ptr nocapture nofree nonnull readonly dereferenceable(3) [[PTR:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr i8, ptr [[PTR]], i64 2
+; CHECK-NEXT: [[T2:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; CHECK-NEXT: store i8 [[T2]], ptr [[OTHER]], align 1
; CHECK-NEXT: ret void
;
%arrayidx0 = getelementptr i8, i8* %ptr, i64 0
- %arrayidx1 = getelementptr i8, i8* %ptr, i64 1
- %arrayidx2 = getelementptr i8, i8* %ptr, i64 2
- %t0 = load i8, i8* %arrayidx0
- %t1 = load i8, i8* %arrayidx1
- %t2 = load i8, i8* %arrayidx2
- store i8 %t2, i8* %other
+ %arrayidx1 = getelementptr i8, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr i8, ptr %ptr, i64 2
+ %t0 = load i8, ptr %arrayidx0
+ %t1 = load i8, ptr %arrayidx1
+ %t2 = load i8, ptr %arrayidx2
+ store i8 %t2, ptr %other
ret void
}
; Order of accesses does not change computation.
; Multiple arguments may be dereferenceable.
-define void @ordering(i8* %ptr1, i32* %ptr2) {
+define void @ordering(ptr %ptr1, ptr %ptr2) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@ordering
-; CHECK-SAME: (i8* nocapture nofree nonnull readnone dereferenceable(3) [[PTR1:%.*]], i32* nocapture nofree nonnull readnone align 4 dereferenceable(8) [[PTR2:%.*]]) #[[ATTR2:[0-9]+]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone dereferenceable(3) [[PTR1:%.*]], ptr nocapture nofree nonnull readnone align 4 dereferenceable(8) [[PTR2:%.*]]) #[[ATTR2:[0-9]+]] {
; CHECK-NEXT: ret void
;
%a20 = getelementptr i32, i32* %ptr2, i64 0
- %a12 = getelementptr i8, i8* %ptr1, i64 2
- %t12 = load i8, i8* %a12
- %a11 = getelementptr i8, i8* %ptr1, i64 1
- %t20 = load i32, i32* %a20
- %a10 = getelementptr i8, i8* %ptr1, i64 0
- %t10 = load i8, i8* %a10
- %t11 = load i8, i8* %a11
- %a21 = getelementptr i32, i32* %ptr2, i64 1
- %t21 = load i32, i32* %a21
+ %a12 = getelementptr i8, ptr %ptr1, i64 2
+ %t12 = load i8, ptr %a12
+ %a11 = getelementptr i8, ptr %ptr1, i64 1
+ %t20 = load i32, ptr %a20
+ %t10 = load i8, ptr %ptr1
+ %t11 = load i8, ptr %a11
+ %a21 = getelementptr i32, ptr %ptr2, i64 1
+ %t21 = load i32, ptr %a21
ret void
}
; Not in entry block.
-define void @not_entry_but_guaranteed_to_execute(i8* %ptr) {
+define void @not_entry_but_guaranteed_to_execute(ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@not_entry_but_guaranteed_to_execute
-; CHECK-SAME: (i8* nocapture nofree nonnull readnone dereferenceable(3) [[PTR:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone dereferenceable(3) [[PTR:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: exit:
entry:
br label %exit
exit:
- %arrayidx0 = getelementptr i8, i8* %ptr, i64 0
- %arrayidx1 = getelementptr i8, i8* %ptr, i64 1
- %arrayidx2 = getelementptr i8, i8* %ptr, i64 2
- %t0 = load i8, i8* %arrayidx0
- %t1 = load i8, i8* %arrayidx1
- %t2 = load i8, i8* %arrayidx2
+ %arrayidx1 = getelementptr i8, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr i8, ptr %ptr, i64 2
+ %t0 = load i8, ptr %ptr
+ %t1 = load i8, ptr %arrayidx1
+ %t2 = load i8, ptr %arrayidx2
ret void
}
; Not in entry block and not guaranteed to execute.
-define void @not_entry_not_guaranteed_to_execute(i8* %ptr, i1 %cond) {
+define void @not_entry_not_guaranteed_to_execute(ptr %ptr, i1 %cond) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@not_entry_not_guaranteed_to_execute
-; CHECK-SAME: (i8* nocapture nofree readnone [[PTR:%.*]], i1 noundef [[COND:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree readnone [[PTR:%.*]], i1 noundef [[COND:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[COND]], label [[LOADS:%.*]], label [[EXIT:%.*]]
; CHECK: loads:
entry:
br i1 %cond, label %loads, label %exit
loads:
- %arrayidx0 = getelementptr i8, i8* %ptr, i64 0
- %arrayidx1 = getelementptr i8, i8* %ptr, i64 1
- %arrayidx2 = getelementptr i8, i8* %ptr, i64 2
- %t0 = load i8, i8* %arrayidx0
- %t1 = load i8, i8* %arrayidx1
- %t2 = load i8, i8* %arrayidx2
+ %arrayidx1 = getelementptr i8, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr i8, ptr %ptr, i64 2
+ %t0 = load i8, ptr %ptr
+ %t1 = load i8, ptr %arrayidx1
+ %t2 = load i8, ptr %arrayidx2
ret void
exit:
ret void
; The last load may not execute, so derefenceable bytes only covers the 1st two loads.
-define void @partial_in_entry(i16* %ptr, i1 %cond) {
+define void @partial_in_entry(ptr %ptr, i1 %cond) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@partial_in_entry
-; CHECK-SAME: (i16* nocapture nofree nonnull readnone align 2 dereferenceable(4) [[PTR:%.*]], i1 noundef [[COND:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 2 dereferenceable(4) [[PTR:%.*]], i1 noundef [[COND:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[COND]], label [[LOADS:%.*]], label [[EXIT:%.*]]
; CHECK: loads:
; CHECK-NEXT: ret void
;
entry:
- %arrayidx0 = getelementptr i16, i16* %ptr, i64 0
- %arrayidx1 = getelementptr i16, i16* %ptr, i64 1
- %arrayidx2 = getelementptr i16, i16* %ptr, i64 2
- %t0 = load i16, i16* %arrayidx0
- %t1 = load i16, i16* %arrayidx1
+ %arrayidx1 = getelementptr i16, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr i16, ptr %ptr, i64 2
+ %t0 = load i16, ptr %ptr
+ %t1 = load i16, ptr %arrayidx1
br i1 %cond, label %loads, label %exit
loads:
- %t2 = load i16, i16* %arrayidx2
+ %t2 = load i16, ptr %arrayidx2
ret void
exit:
ret void
; The volatile load can't be used to prove a non-volatile access is allowed.
; The 2nd and 3rd loads may never execute.
-define void @volatile_is_not_dereferenceable(i16* %ptr) {
+define void @volatile_is_not_dereferenceable(ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nounwind willreturn memory(argmem: readwrite)
; CHECK-LABEL: define {{[^@]+}}@volatile_is_not_dereferenceable
-; CHECK-SAME: (i16* nofree align 2 [[PTR:%.*]]) #[[ATTR3:[0-9]+]] {
-; CHECK-NEXT: [[T0:%.*]] = load volatile i16, i16* [[PTR]], align 2
+; CHECK-SAME: (ptr nofree align 2 [[PTR:%.*]]) #[[ATTR3:[0-9]+]] {
+; CHECK-NEXT: [[T0:%.*]] = load volatile i16, ptr [[PTR]], align 2
; CHECK-NEXT: ret void
;
%arrayidx0 = getelementptr i16, i16* %ptr, i64 0
- %arrayidx1 = getelementptr i16, i16* %ptr, i64 1
- %arrayidx2 = getelementptr i16, i16* %ptr, i64 2
- %t0 = load volatile i16, i16* %arrayidx0
- %t1 = load i16, i16* %arrayidx1
- %t2 = load i16, i16* %arrayidx2
+ %arrayidx1 = getelementptr i16, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr i16, ptr %ptr, i64 2
+ %t0 = load volatile i16, ptr %arrayidx0
+ %t1 = load i16, ptr %arrayidx1
+ %t2 = load i16, ptr %arrayidx2
ret void
}
; TODO: We should allow inference for atomic (but not volatile) ops.
-define void @atomic_is_alright(i16* %ptr) {
+define void @atomic_is_alright(ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@atomic_is_alright
-; CHECK-SAME: (i16* nocapture nofree nonnull readnone align 2 dereferenceable(6) [[PTR:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 2 dereferenceable(6) [[PTR:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%arrayidx0 = getelementptr i16, i16* %ptr, i64 0
- %arrayidx1 = getelementptr i16, i16* %ptr, i64 1
- %arrayidx2 = getelementptr i16, i16* %ptr, i64 2
- %t0 = load atomic i16, i16* %arrayidx0 unordered, align 2
- %t1 = load i16, i16* %arrayidx1
- %t2 = load i16, i16* %arrayidx2
+ %arrayidx1 = getelementptr i16, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr i16, ptr %ptr, i64 2
+ %t0 = load atomic i16, ptr %arrayidx0 unordered, align 2
+ %t1 = load i16, ptr %arrayidx1
+ %t2 = load i16, ptr %arrayidx2
ret void
}
declare void @may_not_return()
-define void @not_guaranteed_to_transfer_execution(i16* %ptr) {
+define void @not_guaranteed_to_transfer_execution(ptr %ptr) {
; CHECK-LABEL: define {{[^@]+}}@not_guaranteed_to_transfer_execution
-; CHECK-SAME: (i16* nocapture nofree nonnull readnone align 2 dereferenceable(2) [[PTR:%.*]]) {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 2 dereferenceable(2) [[PTR:%.*]]) {
; CHECK-NEXT: call void @may_not_return()
; CHECK-NEXT: ret void
;
%arrayidx0 = getelementptr i16, i16* %ptr, i64 0
- %arrayidx1 = getelementptr i16, i16* %ptr, i64 1
- %arrayidx2 = getelementptr i16, i16* %ptr, i64 2
- %t0 = load i16, i16* %arrayidx0
+ %arrayidx1 = getelementptr i16, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr i16, ptr %ptr, i64 2
+ %t0 = load i16, ptr %arrayidx0
call void @may_not_return()
- %t1 = load i16, i16* %arrayidx1
- %t2 = load i16, i16* %arrayidx2
+ %t1 = load i16, ptr %arrayidx1
+ %t2 = load i16, ptr %arrayidx2
ret void
}
; We must have consecutive accesses.
-define void @variable_gep_index(i8* %unused, i8* %ptr, i64 %variable_index) {
+define void @variable_gep_index(ptr %unused, ptr %ptr, i64 %variable_index) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@variable_gep_index
-; CHECK-SAME: (i8* nocapture nofree readnone [[UNUSED:%.*]], i8* nocapture nofree nonnull readnone dereferenceable(1) [[PTR:%.*]], i64 [[VARIABLE_INDEX:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree readnone [[UNUSED:%.*]], ptr nocapture nofree nonnull readnone dereferenceable(1) [[PTR:%.*]], i64 [[VARIABLE_INDEX:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%arrayidx1 = getelementptr i8, i8* %ptr, i64 %variable_index
- %arrayidx2 = getelementptr i8, i8* %ptr, i64 2
- %t0 = load i8, i8* %ptr
- %t1 = load i8, i8* %arrayidx1
- %t2 = load i8, i8* %arrayidx2
+ %arrayidx2 = getelementptr i8, ptr %ptr, i64 2
+ %t0 = load i8, ptr %ptr
+ %t1 = load i8, ptr %arrayidx1
+ %t2 = load i8, ptr %arrayidx2
ret void
}
; Deal with >1 GEP index.
-define void @multi_index_gep(<4 x i8>* %ptr) {
+define void @multi_index_gep(ptr %ptr) {
; FIXME: %ptr should be dereferenceable(4)
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@multi_index_gep
-; CHECK-SAME: (<4 x i8>* nocapture nofree nonnull readnone dereferenceable(1) [[PTR:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone dereferenceable(1) [[PTR:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%arrayidx00 = getelementptr <4 x i8>, <4 x i8>* %ptr, i64 0, i64 0
- %t0 = load i8, i8* %arrayidx00
+ %t0 = load i8, ptr %arrayidx00
ret void
}
; Could round weird bitwidths down?
-define void @not_byte_multiple(i9* %ptr) {
+define void @not_byte_multiple(ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@not_byte_multiple
-; CHECK-SAME: (i9* nocapture nofree nonnull readnone align 2 dereferenceable(2) [[PTR:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 2 dereferenceable(2) [[PTR:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%arrayidx0 = getelementptr i9, i9* %ptr, i64 0
- %t0 = load i9, i9* %arrayidx0
+ %t0 = load i9, ptr %arrayidx0
ret void
}
; Missing direct access from the pointer.
-define void @no_pointer_deref(i16* %ptr) {
+define void @no_pointer_deref(ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@no_pointer_deref
-; CHECK-SAME: (i16* nocapture nofree readnone align 2 [[PTR:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree readnone align 2 [[PTR:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%arrayidx1 = getelementptr i16, i16* %ptr, i64 1
- %arrayidx2 = getelementptr i16, i16* %ptr, i64 2
- %t1 = load i16, i16* %arrayidx1
- %t2 = load i16, i16* %arrayidx2
+ %arrayidx2 = getelementptr i16, ptr %ptr, i64 2
+ %t1 = load i16, ptr %arrayidx1
+ %t2 = load i16, ptr %arrayidx2
ret void
}
; Out-of-order is ok, but missing access concludes dereferenceable range.
-define void @non_consecutive(i32* %ptr) {
+define void @non_consecutive(ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@non_consecutive
-; CHECK-SAME: (i32* nocapture nofree nonnull readnone align 4 dereferenceable(8) [[PTR:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 4 dereferenceable(8) [[PTR:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%arrayidx1 = getelementptr i32, i32* %ptr, i64 1
- %arrayidx0 = getelementptr i32, i32* %ptr, i64 0
- %arrayidx3 = getelementptr i32, i32* %ptr, i64 3
- %t1 = load i32, i32* %arrayidx1
- %t0 = load i32, i32* %arrayidx0
- %t3 = load i32, i32* %arrayidx3
+ %arrayidx3 = getelementptr i32, ptr %ptr, i64 3
+ %t1 = load i32, ptr %arrayidx1
+ %t0 = load i32, ptr %ptr
+ %t3 = load i32, ptr %arrayidx3
ret void
}
; Improve on existing dereferenceable attribute.
-define void @more_bytes(i32* dereferenceable(8) %ptr) {
+define void @more_bytes(ptr dereferenceable(8) %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@more_bytes
-; CHECK-SAME: (i32* nocapture nofree nonnull readnone align 4 dereferenceable(16) [[PTR:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 4 dereferenceable(16) [[PTR:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%arrayidx3 = getelementptr i32, i32* %ptr, i64 3
- %arrayidx1 = getelementptr i32, i32* %ptr, i64 1
- %arrayidx0 = getelementptr i32, i32* %ptr, i64 0
- %arrayidx2 = getelementptr i32, i32* %ptr, i64 2
- %t3 = load i32, i32* %arrayidx3
- %t1 = load i32, i32* %arrayidx1
- %t2 = load i32, i32* %arrayidx2
- %t0 = load i32, i32* %arrayidx0
+ %arrayidx1 = getelementptr i32, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr i32, ptr %ptr, i64 2
+ %t3 = load i32, ptr %arrayidx3
+ %t1 = load i32, ptr %arrayidx1
+ %t2 = load i32, ptr %arrayidx2
+ %t0 = load i32, ptr %ptr
ret void
}
; Improve on existing dereferenceable_or_null attribute.
-define void @more_bytes_and_not_null(i32* dereferenceable_or_null(8) %ptr) {
+define void @more_bytes_and_not_null(ptr dereferenceable_or_null(8) %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@more_bytes_and_not_null
-; CHECK-SAME: (i32* nocapture nofree nonnull readnone align 4 dereferenceable(16) [[PTR:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 4 dereferenceable(16) [[PTR:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%arrayidx3 = getelementptr i32, i32* %ptr, i64 3
- %arrayidx1 = getelementptr i32, i32* %ptr, i64 1
- %arrayidx0 = getelementptr i32, i32* %ptr, i64 0
- %arrayidx2 = getelementptr i32, i32* %ptr, i64 2
- %t3 = load i32, i32* %arrayidx3
- %t1 = load i32, i32* %arrayidx1
- %t2 = load i32, i32* %arrayidx2
- %t0 = load i32, i32* %arrayidx0
+ %arrayidx1 = getelementptr i32, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr i32, ptr %ptr, i64 2
+ %t3 = load i32, ptr %arrayidx3
+ %t1 = load i32, ptr %arrayidx1
+ %t2 = load i32, ptr %arrayidx2
+ %t0 = load i32, ptr %ptr
ret void
}
; But don't pessimize existing dereferenceable attribute.
-define void @better_bytes(i32* dereferenceable(100) %ptr) {
+define void @better_bytes(ptr dereferenceable(100) %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@better_bytes
-; CHECK-SAME: (i32* nocapture nofree nonnull readnone align 4 dereferenceable(100) [[PTR:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 4 dereferenceable(100) [[PTR:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%arrayidx3 = getelementptr i32, i32* %ptr, i64 3
- %arrayidx1 = getelementptr i32, i32* %ptr, i64 1
- %arrayidx0 = getelementptr i32, i32* %ptr, i64 0
- %arrayidx2 = getelementptr i32, i32* %ptr, i64 2
- %t3 = load i32, i32* %arrayidx3
- %t1 = load i32, i32* %arrayidx1
- %t2 = load i32, i32* %arrayidx2
- %t0 = load i32, i32* %arrayidx0
+ %arrayidx1 = getelementptr i32, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr i32, ptr %ptr, i64 2
+ %t3 = load i32, ptr %arrayidx3
+ %t1 = load i32, ptr %arrayidx1
+ %t2 = load i32, ptr %arrayidx2
+ %t0 = load i32, ptr %ptr
ret void
}
-define void @bitcast(i32* %arg) {
+define void @bitcast(ptr %arg) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@bitcast
-; CHECK-SAME: (i32* nocapture nofree nonnull readnone align 4 dereferenceable(8) [[ARG:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 4 dereferenceable(8) [[ARG:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%ptr = bitcast i32* %arg to float*
- %arrayidx0 = getelementptr float, float* %ptr, i64 0
- %arrayidx1 = getelementptr float, float* %ptr, i64 1
- %t0 = load float, float* %arrayidx0
- %t1 = load float, float* %arrayidx1
+ %arrayidx1 = getelementptr float, ptr %ptr, i64 1
+ %t0 = load float, ptr %ptr
+ %t1 = load float, ptr %arrayidx1
ret void
}
-define void @bitcast_different_sizes(double* %arg1, i8* %arg2) {
+define void @bitcast_different_sizes(ptr %arg1, ptr %arg2) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@bitcast_different_sizes
-; CHECK-SAME: (double* nocapture nofree nonnull readnone align 4 dereferenceable(12) [[ARG1:%.*]], i8* nocapture nofree nonnull readnone align 4 dereferenceable(16) [[ARG2:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 4 dereferenceable(12) [[ARG1:%.*]], ptr nocapture nofree nonnull readnone align 4 dereferenceable(16) [[ARG2:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%ptr1 = bitcast double* %arg1 to float*
- %a10 = getelementptr float, float* %ptr1, i64 0
- %a11 = getelementptr float, float* %ptr1, i64 1
- %a12 = getelementptr float, float* %ptr1, i64 2
- %ld10 = load float, float* %a10
- %ld11 = load float, float* %a11
- %ld12 = load float, float* %a12
-
- %ptr2 = bitcast i8* %arg2 to i64*
- %a20 = getelementptr i64, i64* %ptr2, i64 0
- %a21 = getelementptr i64, i64* %ptr2, i64 1
- %ld20 = load i64, i64* %a20
- %ld21 = load i64, i64* %a21
+ %a11 = getelementptr float, ptr %ptr1, i64 1
+ %a12 = getelementptr float, ptr %ptr1, i64 2
+ %ld10 = load float, ptr %ptr1
+ %ld11 = load float, ptr %a11
+ %ld12 = load float, ptr %a12
+
+ %a21 = getelementptr i64, ptr %arg2, i64 1
+ %ld20 = load i64, ptr %arg2
+ %ld21 = load i64, ptr %a21
ret void
}
-define void @negative_offset(i32* %arg) {
+define void @negative_offset(ptr %arg) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@negative_offset
-; CHECK-SAME: (i32* nocapture nofree nonnull readnone align 4 dereferenceable(4) [[ARG:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 4 dereferenceable(4) [[ARG:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%ptr = bitcast i32* %arg to float*
- %arrayidx0 = getelementptr float, float* %ptr, i64 0
- %arrayidx1 = getelementptr float, float* %ptr, i64 -1
- %t0 = load float, float* %arrayidx0
- %t1 = load float, float* %arrayidx1
+ %arrayidx1 = getelementptr float, ptr %ptr, i64 -1
+ %t0 = load float, ptr %ptr
+ %t1 = load float, ptr %arrayidx1
ret void
}
-define void @stores(i32* %arg) {
+define void @stores(ptr %arg) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: write)
; CHECK-LABEL: define {{[^@]+}}@stores
-; CHECK-SAME: (i32* nocapture nofree nonnull writeonly align 4 dereferenceable(8) [[ARG:%.*]]) #[[ATTR4:[0-9]+]] {
-; CHECK-NEXT: [[PTR:%.*]] = bitcast i32* [[ARG]] to float*
-; CHECK-NEXT: [[ARRAYIDX0:%.*]] = getelementptr float, float* [[PTR]], i64 0
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr float, float* [[PTR]], i64 1
-; CHECK-NEXT: store float 1.000000e+00, float* [[ARRAYIDX0]], align 4
-; CHECK-NEXT: store float 2.000000e+00, float* [[ARRAYIDX1]], align 4
+; CHECK-SAME: (ptr nocapture nofree nonnull writeonly align 4 dereferenceable(8) [[ARG:%.*]]) #[[ATTR4:[0-9]+]] {
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr float, ptr [[ARG]], i64 1
+; CHECK-NEXT: store float 1.000000e+00, ptr [[ARG]], align 4
+; CHECK-NEXT: store float 2.000000e+00, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: ret void
;
%ptr = bitcast i32* %arg to float*
- %arrayidx0 = getelementptr float, float* %ptr, i64 0
- %arrayidx1 = getelementptr float, float* %ptr, i64 1
- store float 1.0, float* %arrayidx0
- store float 2.0, float* %arrayidx1
+ %arrayidx1 = getelementptr float, ptr %ptr, i64 1
+ store float 1.0, ptr %ptr
+ store float 2.0, ptr %arrayidx1
ret void
}
-define void @load_store(i32* %arg) {
+define void @load_store(ptr %arg) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: write)
; CHECK-LABEL: define {{[^@]+}}@load_store
-; CHECK-SAME: (i32* nocapture nofree nonnull writeonly align 4 dereferenceable(8) [[ARG:%.*]]) #[[ATTR4]] {
-; CHECK-NEXT: [[PTR:%.*]] = bitcast i32* [[ARG]] to float*
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr float, float* [[PTR]], i64 1
-; CHECK-NEXT: store float 2.000000e+00, float* [[ARRAYIDX1]], align 4
+; CHECK-SAME: (ptr nocapture nofree nonnull writeonly align 4 dereferenceable(8) [[ARG:%.*]]) #[[ATTR4]] {
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr float, ptr [[ARG]], i64 1
+; CHECK-NEXT: store float 2.000000e+00, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: ret void
;
%ptr = bitcast i32* %arg to float*
- %arrayidx0 = getelementptr float, float* %ptr, i64 0
- %arrayidx1 = getelementptr float, float* %ptr, i64 1
- %t1 = load float, float* %arrayidx0
- store float 2.0, float* %arrayidx1
+ %arrayidx1 = getelementptr float, ptr %ptr, i64 1
+ %t1 = load float, ptr %ptr
+ store float 2.0, ptr %arrayidx1
ret void
}
-define void @different_size1(i32* %arg) {
+define void @different_size1(ptr %arg) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: write)
; CHECK-LABEL: define {{[^@]+}}@different_size1
-; CHECK-SAME: (i32* nocapture nofree noundef nonnull writeonly align 8 dereferenceable(8) [[ARG:%.*]]) #[[ATTR4]] {
-; CHECK-NEXT: [[ARG_CAST:%.*]] = bitcast i32* [[ARG]] to double*
-; CHECK-NEXT: store double 0.000000e+00, double* [[ARG_CAST]], align 8
-; CHECK-NEXT: store i32 0, i32* [[ARG]], align 8
+; CHECK-SAME: (ptr nocapture nofree noundef nonnull writeonly align 8 dereferenceable(8) [[ARG:%.*]]) #[[ATTR4]] {
+; CHECK-NEXT: store double 0.000000e+00, ptr [[ARG]], align 8
+; CHECK-NEXT: store i32 0, ptr [[ARG]], align 8
; CHECK-NEXT: ret void
;
%arg-cast = bitcast i32* %arg to double*
- store double 0.000000e+00, double* %arg-cast
- store i32 0, i32* %arg
+ store double 0.000000e+00, ptr %arg-cast
+ store i32 0, ptr %arg
ret void
}
-define void @different_size2(i32* %arg) {
+define void @different_size2(ptr %arg) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: write)
; CHECK-LABEL: define {{[^@]+}}@different_size2
-; CHECK-SAME: (i32* nocapture nofree noundef nonnull writeonly align 8 dereferenceable(8) [[ARG:%.*]]) #[[ATTR4]] {
-; CHECK-NEXT: store i32 0, i32* [[ARG]], align 8
-; CHECK-NEXT: [[ARG_CAST:%.*]] = bitcast i32* [[ARG]] to double*
-; CHECK-NEXT: store double 0.000000e+00, double* [[ARG_CAST]], align 8
+; CHECK-SAME: (ptr nocapture nofree noundef nonnull writeonly align 8 dereferenceable(8) [[ARG:%.*]]) #[[ATTR4]] {
+; CHECK-NEXT: store i32 0, ptr [[ARG]], align 8
+; CHECK-NEXT: store double 0.000000e+00, ptr [[ARG]], align 8
; CHECK-NEXT: ret void
;
store i32 0, i32* %arg
- %arg-cast = bitcast i32* %arg to double*
- store double 0.000000e+00, double* %arg-cast
+ store double 0.000000e+00, ptr %arg
ret void
}
; Therefore, %p must be dereferenced.
;
; ATTRIBUTOR_CGSCC_NPM-LABEL: define i32 @require_cfg_analysis(i32 %c, i32* {{.*}} dereferenceable(4) %p)
-define i32 @require_cfg_analysis(i32 %c, i32* %p) {
+define i32 @require_cfg_analysis(i32 %c, ptr %p) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: write)
; CHECK-LABEL: define {{[^@]+}}@require_cfg_analysis
-; CHECK-SAME: (i32 [[C:%.*]], i32* nocapture nofree nonnull writeonly align 4 dereferenceable(4) [[P:%.*]]) #[[ATTR4]] {
+; CHECK-SAME: (i32 [[C:%.*]], ptr nocapture nofree nonnull writeonly align 4 dereferenceable(4) [[P:%.*]]) #[[ATTR4]] {
; CHECK-NEXT: [[TOBOOL1:%.*]] = icmp eq i32 [[C]], 0
; CHECK-NEXT: br i1 [[TOBOOL1]], label [[L1:%.*]], label [[L2:%.*]]
; CHECK: l1:
; CHECK-NEXT: [[TOBOOL4:%.*]] = icmp eq i32 [[C]], 4
; CHECK-NEXT: br i1 [[TOBOOL4]], label [[L6:%.*]], label [[L7:%.*]]
; CHECK: l6:
-; CHECK-NEXT: store i32 0, i32* [[P]], align 4
+; CHECK-NEXT: store i32 0, ptr [[P]], align 4
; CHECK-NEXT: br label [[END:%.*]]
; CHECK: l7:
-; CHECK-NEXT: store i32 1, i32* [[P]], align 4
+; CHECK-NEXT: store i32 1, ptr [[P]], align 4
; CHECK-NEXT: br label [[END]]
; CHECK: end:
; CHECK-NEXT: ret i32 1
%tobool4 = icmp eq i32 %c, 4
br i1 %tobool4, label %l6, label %l7
l6:
- store i32 0, i32* %p
+ store i32 0, ptr %p
br label %end
l7:
- store i32 1, i32* %p
+ store i32 1, ptr %p
br label %end
end:
ret i32 1
; Determine dereference-ability before unused loads get deleted:
; https://bugs.llvm.org/show_bug.cgi?id=21780
-define <4 x double> @PR21780(double* %ptr) {
+define <4 x double> @PR21780(ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: read)
; CHECK-LABEL: define {{[^@]+}}@PR21780
-; CHECK-SAME: (double* nocapture nofree noundef nonnull readonly align 8 dereferenceable(32) [[PTR:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[PTR]], i64 1
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[PTR]], i64 2
-; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[PTR]], i64 3
-; CHECK-NEXT: [[T0:%.*]] = load double, double* [[PTR]], align 8
-; CHECK-NEXT: [[T1:%.*]] = load double, double* [[ARRAYIDX1]], align 8
-; CHECK-NEXT: [[T2:%.*]] = load double, double* [[ARRAYIDX2]], align 8
-; CHECK-NEXT: [[T3:%.*]] = load double, double* [[ARRAYIDX3]], align 8
+; CHECK-SAME: (ptr nocapture nofree noundef nonnull readonly align 8 dereferenceable(32) [[PTR:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, ptr [[PTR]], i64 1
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, ptr [[PTR]], i64 2
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, ptr [[PTR]], i64 3
+; CHECK-NEXT: [[T0:%.*]] = load double, ptr [[PTR]], align 8
+; CHECK-NEXT: [[T1:%.*]] = load double, ptr [[ARRAYIDX1]], align 8
+; CHECK-NEXT: [[T2:%.*]] = load double, ptr [[ARRAYIDX2]], align 8
+; CHECK-NEXT: [[T3:%.*]] = load double, ptr [[ARRAYIDX3]], align 8
; CHECK-NEXT: [[VECINIT0:%.*]] = insertelement <4 x double> undef, double [[T0]], i32 0
; CHECK-NEXT: [[VECINIT1:%.*]] = insertelement <4 x double> [[VECINIT0]], double [[T1]], i32 1
; CHECK-NEXT: [[VECINIT2:%.*]] = insertelement <4 x double> [[VECINIT1]], double [[T2]], i32 2
;
; GEP of index 0 is simplified away.
- %arrayidx1 = getelementptr inbounds double, double* %ptr, i64 1
- %arrayidx2 = getelementptr inbounds double, double* %ptr, i64 2
- %arrayidx3 = getelementptr inbounds double, double* %ptr, i64 3
+ %arrayidx1 = getelementptr inbounds double, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr inbounds double, ptr %ptr, i64 2
+ %arrayidx3 = getelementptr inbounds double, ptr %ptr, i64 3
- %t0 = load double, double* %ptr, align 8
- %t1 = load double, double* %arrayidx1, align 8
- %t2 = load double, double* %arrayidx2, align 8
- %t3 = load double, double* %arrayidx3, align 8
+ %t0 = load double, ptr %ptr, align 8
+ %t1 = load double, ptr %arrayidx1, align 8
+ %t2 = load double, ptr %arrayidx2, align 8
+ %t3 = load double, ptr %arrayidx3, align 8
%vecinit0 = insertelement <4 x double> undef, double %t0, i32 0
%vecinit1 = insertelement <4 x double> %vecinit0, double %t1, i32 1
}
-define double @PR21780_only_access3_with_inbounds(double* %ptr) {
+define double @PR21780_only_access3_with_inbounds(ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: read)
; CHECK-LABEL: define {{[^@]+}}@PR21780_only_access3_with_inbounds
-; CHECK-SAME: (double* nocapture nofree nonnull readonly align 8 dereferenceable(32) [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[PTR]], i64 3
-; CHECK-NEXT: [[T3:%.*]] = load double, double* [[ARRAYIDX3]], align 8
+; CHECK-SAME: (ptr nocapture nofree nonnull readonly align 8 dereferenceable(32) [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, ptr [[PTR]], i64 3
+; CHECK-NEXT: [[T3:%.*]] = load double, ptr [[ARRAYIDX3]], align 8
; CHECK-NEXT: ret double [[T3]]
;
%arrayidx3 = getelementptr inbounds double, double* %ptr, i64 3
- %t3 = load double, double* %arrayidx3, align 8
+ %t3 = load double, ptr %arrayidx3, align 8
ret double %t3
}
-define double @PR21780_only_access3_without_inbounds(double* %ptr) {
+define double @PR21780_only_access3_without_inbounds(ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: read)
; CHECK-LABEL: define {{[^@]+}}@PR21780_only_access3_without_inbounds
-; CHECK-SAME: (double* nocapture nofree readonly align 8 [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr double, double* [[PTR]], i64 3
-; CHECK-NEXT: [[T3:%.*]] = load double, double* [[ARRAYIDX3]], align 8
+; CHECK-SAME: (ptr nocapture nofree readonly align 8 [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr double, ptr [[PTR]], i64 3
+; CHECK-NEXT: [[T3:%.*]] = load double, ptr [[ARRAYIDX3]], align 8
; CHECK-NEXT: ret double [[T3]]
;
%arrayidx3 = getelementptr double, double* %ptr, i64 3
- %t3 = load double, double* %arrayidx3, align 8
+ %t3 = load double, ptr %arrayidx3, align 8
ret double %t3
}
-define double @PR21780_without_inbounds(double* %ptr) {
+define double @PR21780_without_inbounds(ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: read)
; CHECK-LABEL: define {{[^@]+}}@PR21780_without_inbounds
-; CHECK-SAME: (double* nocapture nofree noundef nonnull readonly align 8 dereferenceable(32) [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr double, double* [[PTR]], i64 3
-; CHECK-NEXT: [[T3:%.*]] = load double, double* [[ARRAYIDX3]], align 8
+; CHECK-SAME: (ptr nocapture nofree noundef nonnull readonly align 8 dereferenceable(32) [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr double, ptr [[PTR]], i64 3
+; CHECK-NEXT: [[T3:%.*]] = load double, ptr [[ARRAYIDX3]], align 8
; CHECK-NEXT: ret double [[T3]]
;
%arrayidx1 = getelementptr double, double* %ptr, i64 1
- %arrayidx2 = getelementptr double, double* %ptr, i64 2
- %arrayidx3 = getelementptr double, double* %ptr, i64 3
+ %arrayidx2 = getelementptr double, ptr %ptr, i64 2
+ %arrayidx3 = getelementptr double, ptr %ptr, i64 3
- %t0 = load double, double* %ptr, align 8
- %t1 = load double, double* %arrayidx1, align 8
- %t2 = load double, double* %arrayidx2, align 8
- %t3 = load double, double* %arrayidx3, align 8
+ %t0 = load double, ptr %ptr, align 8
+ %t1 = load double, ptr %arrayidx1, align 8
+ %t2 = load double, ptr %arrayidx2, align 8
+ %t3 = load double, ptr %arrayidx3, align 8
ret double %t3
}
; Unsimplified, but still valid. Also, throw in some bogus arguments.
-define void @gep0(i8* %unused, i8* %other, i8* %ptr) {
+define void @gep0(ptr %unused, ptr %other, ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
; CHECK-LABEL: define {{[^@]+}}@gep0
-; CHECK-SAME: (i8* nocapture nofree readnone [[UNUSED:%.*]], i8* nocapture nofree noundef nonnull writeonly dereferenceable(1) [[OTHER:%.*]], i8* nocapture nofree nonnull readonly dereferenceable(3) [[PTR:%.*]]) #[[ATTR1:[0-9]+]] {
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr i8, i8* [[PTR]], i64 2
-; CHECK-NEXT: [[T2:%.*]] = load i8, i8* [[ARRAYIDX2]], align 1
-; CHECK-NEXT: store i8 [[T2]], i8* [[OTHER]], align 1
+; CHECK-SAME: (ptr nocapture nofree readnone [[UNUSED:%.*]], ptr nocapture nofree noundef nonnull writeonly dereferenceable(1) [[OTHER:%.*]], ptr nocapture nofree nonnull readonly dereferenceable(3) [[PTR:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr i8, ptr [[PTR]], i64 2
+; CHECK-NEXT: [[T2:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; CHECK-NEXT: store i8 [[T2]], ptr [[OTHER]], align 1
; CHECK-NEXT: ret void
;
%arrayidx0 = getelementptr i8, i8* %ptr, i64 0
- %arrayidx1 = getelementptr i8, i8* %ptr, i64 1
- %arrayidx2 = getelementptr i8, i8* %ptr, i64 2
- %t0 = load i8, i8* %arrayidx0
- %t1 = load i8, i8* %arrayidx1
- %t2 = load i8, i8* %arrayidx2
- store i8 %t2, i8* %other
+ %arrayidx1 = getelementptr i8, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr i8, ptr %ptr, i64 2
+ %t0 = load i8, ptr %arrayidx0
+ %t1 = load i8, ptr %arrayidx1
+ %t2 = load i8, ptr %arrayidx2
+ store i8 %t2, ptr %other
ret void
}
; Order of accesses does not change computation.
; Multiple arguments may be dereferenceable.
-define void @ordering(i8* %ptr1, i32* %ptr2) {
+define void @ordering(ptr %ptr1, ptr %ptr2) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@ordering
-; CHECK-SAME: (i8* nocapture nofree nonnull readnone dereferenceable(3) [[PTR1:%.*]], i32* nocapture nofree nonnull readnone align 4 dereferenceable(8) [[PTR2:%.*]]) #[[ATTR2:[0-9]+]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone dereferenceable(3) [[PTR1:%.*]], ptr nocapture nofree nonnull readnone align 4 dereferenceable(8) [[PTR2:%.*]]) #[[ATTR2:[0-9]+]] {
; CHECK-NEXT: ret void
;
%a20 = getelementptr i32, i32* %ptr2, i64 0
- %a12 = getelementptr i8, i8* %ptr1, i64 2
- %t12 = load i8, i8* %a12
- %a11 = getelementptr i8, i8* %ptr1, i64 1
- %t20 = load i32, i32* %a20
- %a10 = getelementptr i8, i8* %ptr1, i64 0
- %t10 = load i8, i8* %a10
- %t11 = load i8, i8* %a11
- %a21 = getelementptr i32, i32* %ptr2, i64 1
- %t21 = load i32, i32* %a21
+ %a12 = getelementptr i8, ptr %ptr1, i64 2
+ %t12 = load i8, ptr %a12
+ %a11 = getelementptr i8, ptr %ptr1, i64 1
+ %t20 = load i32, ptr %a20
+ %t10 = load i8, ptr %ptr1
+ %t11 = load i8, ptr %a11
+ %a21 = getelementptr i32, ptr %ptr2, i64 1
+ %t21 = load i32, ptr %a21
ret void
}
; Not in entry block.
-define void @not_entry_but_guaranteed_to_execute(i8* %ptr) {
+define void @not_entry_but_guaranteed_to_execute(ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@not_entry_but_guaranteed_to_execute
-; CHECK-SAME: (i8* nocapture nofree nonnull readnone dereferenceable(3) [[PTR:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone dereferenceable(3) [[PTR:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: exit:
entry:
br label %exit
exit:
- %arrayidx0 = getelementptr i8, i8* %ptr, i64 0
- %arrayidx1 = getelementptr i8, i8* %ptr, i64 1
- %arrayidx2 = getelementptr i8, i8* %ptr, i64 2
- %t0 = load i8, i8* %arrayidx0
- %t1 = load i8, i8* %arrayidx1
- %t2 = load i8, i8* %arrayidx2
+ %arrayidx1 = getelementptr i8, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr i8, ptr %ptr, i64 2
+ %t0 = load i8, ptr %ptr
+ %t1 = load i8, ptr %arrayidx1
+ %t2 = load i8, ptr %arrayidx2
ret void
}
; Not in entry block and not guaranteed to execute.
-define void @not_entry_not_guaranteed_to_execute(i8* %ptr, i1 %cond) {
+define void @not_entry_not_guaranteed_to_execute(ptr %ptr, i1 %cond) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@not_entry_not_guaranteed_to_execute
-; CHECK-SAME: (i8* nocapture nofree readnone [[PTR:%.*]], i1 noundef [[COND:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree readnone [[PTR:%.*]], i1 noundef [[COND:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[COND]], label [[LOADS:%.*]], label [[EXIT:%.*]]
; CHECK: loads:
entry:
br i1 %cond, label %loads, label %exit
loads:
- %arrayidx0 = getelementptr i8, i8* %ptr, i64 0
- %arrayidx1 = getelementptr i8, i8* %ptr, i64 1
- %arrayidx2 = getelementptr i8, i8* %ptr, i64 2
- %t0 = load i8, i8* %arrayidx0
- %t1 = load i8, i8* %arrayidx1
- %t2 = load i8, i8* %arrayidx2
+ %arrayidx1 = getelementptr i8, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr i8, ptr %ptr, i64 2
+ %t0 = load i8, ptr %ptr
+ %t1 = load i8, ptr %arrayidx1
+ %t2 = load i8, ptr %arrayidx2
ret void
exit:
ret void
; The last load may not execute, so derefenceable bytes only covers the 1st two loads.
-define void @partial_in_entry(i16* %ptr, i1 %cond) {
+define void @partial_in_entry(ptr %ptr, i1 %cond) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@partial_in_entry
-; CHECK-SAME: (i16* nocapture nofree nonnull readnone align 2 dereferenceable(4) [[PTR:%.*]], i1 noundef [[COND:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 2 dereferenceable(4) [[PTR:%.*]], i1 noundef [[COND:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[COND]], label [[LOADS:%.*]], label [[EXIT:%.*]]
; CHECK: loads:
; CHECK-NEXT: ret void
;
entry:
- %arrayidx0 = getelementptr i16, i16* %ptr, i64 0
- %arrayidx1 = getelementptr i16, i16* %ptr, i64 1
- %arrayidx2 = getelementptr i16, i16* %ptr, i64 2
- %t0 = load i16, i16* %arrayidx0
- %t1 = load i16, i16* %arrayidx1
+ %arrayidx1 = getelementptr i16, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr i16, ptr %ptr, i64 2
+ %t0 = load i16, ptr %ptr
+ %t1 = load i16, ptr %arrayidx1
br i1 %cond, label %loads, label %exit
loads:
- %t2 = load i16, i16* %arrayidx2
+ %t2 = load i16, ptr %arrayidx2
ret void
exit:
ret void
; The volatile load can't be used to prove a non-volatile access is allowed.
; The 2nd and 3rd loads may never execute.
-define void @volatile_is_not_dereferenceable(i16* %ptr) {
+define void @volatile_is_not_dereferenceable(ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nounwind willreturn memory(argmem: readwrite)
; CHECK-LABEL: define {{[^@]+}}@volatile_is_not_dereferenceable
-; CHECK-SAME: (i16* nofree align 2 [[PTR:%.*]]) #[[ATTR3:[0-9]+]] {
-; CHECK-NEXT: [[T0:%.*]] = load volatile i16, i16* [[PTR]], align 2
+; CHECK-SAME: (ptr nofree align 2 [[PTR:%.*]]) #[[ATTR3:[0-9]+]] {
+; CHECK-NEXT: [[T0:%.*]] = load volatile i16, ptr [[PTR]], align 2
; CHECK-NEXT: ret void
;
%arrayidx0 = getelementptr i16, i16* %ptr, i64 0
- %arrayidx1 = getelementptr i16, i16* %ptr, i64 1
- %arrayidx2 = getelementptr i16, i16* %ptr, i64 2
- %t0 = load volatile i16, i16* %arrayidx0
- %t1 = load i16, i16* %arrayidx1
- %t2 = load i16, i16* %arrayidx2
+ %arrayidx1 = getelementptr i16, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr i16, ptr %ptr, i64 2
+ %t0 = load volatile i16, ptr %arrayidx0
+ %t1 = load i16, ptr %arrayidx1
+ %t2 = load i16, ptr %arrayidx2
ret void
}
; TODO: We should allow inference for atomic (but not volatile) ops.
-define void @atomic_is_alright(i16* %ptr) {
+define void @atomic_is_alright(ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@atomic_is_alright
-; CHECK-SAME: (i16* nocapture nofree nonnull readnone align 2 dereferenceable(6) [[PTR:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 2 dereferenceable(6) [[PTR:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%arrayidx0 = getelementptr i16, i16* %ptr, i64 0
- %arrayidx1 = getelementptr i16, i16* %ptr, i64 1
- %arrayidx2 = getelementptr i16, i16* %ptr, i64 2
- %t0 = load atomic i16, i16* %arrayidx0 unordered, align 2
- %t1 = load i16, i16* %arrayidx1
- %t2 = load i16, i16* %arrayidx2
+ %arrayidx1 = getelementptr i16, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr i16, ptr %ptr, i64 2
+ %t0 = load atomic i16, ptr %arrayidx0 unordered, align 2
+ %t1 = load i16, ptr %arrayidx1
+ %t2 = load i16, ptr %arrayidx2
ret void
}
declare void @may_not_return()
-define void @not_guaranteed_to_transfer_execution(i16* %ptr) {
+define void @not_guaranteed_to_transfer_execution(ptr %ptr) {
; CHECK-LABEL: define {{[^@]+}}@not_guaranteed_to_transfer_execution
-; CHECK-SAME: (i16* nocapture nofree nonnull readnone align 2 dereferenceable(2) [[PTR:%.*]]) {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 2 dereferenceable(2) [[PTR:%.*]]) {
; CHECK-NEXT: call void @may_not_return()
; CHECK-NEXT: ret void
;
%arrayidx0 = getelementptr i16, i16* %ptr, i64 0
- %arrayidx1 = getelementptr i16, i16* %ptr, i64 1
- %arrayidx2 = getelementptr i16, i16* %ptr, i64 2
- %t0 = load i16, i16* %arrayidx0
+ %arrayidx1 = getelementptr i16, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr i16, ptr %ptr, i64 2
+ %t0 = load i16, ptr %arrayidx0
call void @may_not_return()
- %t1 = load i16, i16* %arrayidx1
- %t2 = load i16, i16* %arrayidx2
+ %t1 = load i16, ptr %arrayidx1
+ %t2 = load i16, ptr %arrayidx2
ret void
}
; We must have consecutive accesses.
-define void @variable_gep_index(i8* %unused, i8* %ptr, i64 %variable_index) {
+define void @variable_gep_index(ptr %unused, ptr %ptr, i64 %variable_index) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@variable_gep_index
-; CHECK-SAME: (i8* nocapture nofree readnone [[UNUSED:%.*]], i8* nocapture nofree nonnull readnone dereferenceable(1) [[PTR:%.*]], i64 [[VARIABLE_INDEX:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree readnone [[UNUSED:%.*]], ptr nocapture nofree nonnull readnone dereferenceable(1) [[PTR:%.*]], i64 [[VARIABLE_INDEX:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%arrayidx1 = getelementptr i8, i8* %ptr, i64 %variable_index
- %arrayidx2 = getelementptr i8, i8* %ptr, i64 2
- %t0 = load i8, i8* %ptr
- %t1 = load i8, i8* %arrayidx1
- %t2 = load i8, i8* %arrayidx2
+ %arrayidx2 = getelementptr i8, ptr %ptr, i64 2
+ %t0 = load i8, ptr %ptr
+ %t1 = load i8, ptr %arrayidx1
+ %t2 = load i8, ptr %arrayidx2
ret void
}
; Deal with >1 GEP index.
-define void @multi_index_gep(<4 x i8>* %ptr) {
+define void @multi_index_gep(ptr %ptr) {
; FIXME: %ptr should be dereferenceable(4)
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@multi_index_gep
-; CHECK-SAME: (<4 x i8>* nocapture nofree nonnull readnone dereferenceable(1) [[PTR:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone dereferenceable(1) [[PTR:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%arrayidx00 = getelementptr <4 x i8>, <4 x i8>* %ptr, i64 0, i64 0
- %t0 = load i8, i8* %arrayidx00
+ %t0 = load i8, ptr %arrayidx00
ret void
}
; Could round weird bitwidths down?
-define void @not_byte_multiple(i9* %ptr) {
+define void @not_byte_multiple(ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@not_byte_multiple
-; CHECK-SAME: (i9* nocapture nofree nonnull readnone align 2 dereferenceable(2) [[PTR:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 2 dereferenceable(2) [[PTR:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%arrayidx0 = getelementptr i9, i9* %ptr, i64 0
- %t0 = load i9, i9* %arrayidx0
+ %t0 = load i9, ptr %arrayidx0
ret void
}
; Missing direct access from the pointer.
-define void @no_pointer_deref(i16* %ptr) {
+define void @no_pointer_deref(ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@no_pointer_deref
-; CHECK-SAME: (i16* nocapture nofree readnone align 2 [[PTR:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree readnone align 2 [[PTR:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%arrayidx1 = getelementptr i16, i16* %ptr, i64 1
- %arrayidx2 = getelementptr i16, i16* %ptr, i64 2
- %t1 = load i16, i16* %arrayidx1
- %t2 = load i16, i16* %arrayidx2
+ %arrayidx2 = getelementptr i16, ptr %ptr, i64 2
+ %t1 = load i16, ptr %arrayidx1
+ %t2 = load i16, ptr %arrayidx2
ret void
}
; Out-of-order is ok, but missing access concludes dereferenceable range.
-define void @non_consecutive(i32* %ptr) {
+define void @non_consecutive(ptr %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@non_consecutive
-; CHECK-SAME: (i32* nocapture nofree nonnull readnone align 4 dereferenceable(8) [[PTR:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 4 dereferenceable(8) [[PTR:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%arrayidx1 = getelementptr i32, i32* %ptr, i64 1
- %arrayidx0 = getelementptr i32, i32* %ptr, i64 0
- %arrayidx3 = getelementptr i32, i32* %ptr, i64 3
- %t1 = load i32, i32* %arrayidx1
- %t0 = load i32, i32* %arrayidx0
- %t3 = load i32, i32* %arrayidx3
+ %arrayidx3 = getelementptr i32, ptr %ptr, i64 3
+ %t1 = load i32, ptr %arrayidx1
+ %t0 = load i32, ptr %ptr
+ %t3 = load i32, ptr %arrayidx3
ret void
}
; Improve on existing dereferenceable attribute.
-define void @more_bytes(i32* dereferenceable(8) %ptr) {
+define void @more_bytes(ptr dereferenceable(8) %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@more_bytes
-; CHECK-SAME: (i32* nocapture nofree nonnull readnone align 4 dereferenceable(16) [[PTR:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 4 dereferenceable(16) [[PTR:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%arrayidx3 = getelementptr i32, i32* %ptr, i64 3
- %arrayidx1 = getelementptr i32, i32* %ptr, i64 1
- %arrayidx0 = getelementptr i32, i32* %ptr, i64 0
- %arrayidx2 = getelementptr i32, i32* %ptr, i64 2
- %t3 = load i32, i32* %arrayidx3
- %t1 = load i32, i32* %arrayidx1
- %t2 = load i32, i32* %arrayidx2
- %t0 = load i32, i32* %arrayidx0
+ %arrayidx1 = getelementptr i32, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr i32, ptr %ptr, i64 2
+ %t3 = load i32, ptr %arrayidx3
+ %t1 = load i32, ptr %arrayidx1
+ %t2 = load i32, ptr %arrayidx2
+ %t0 = load i32, ptr %ptr
ret void
}
; Improve on existing dereferenceable_or_null attribute.
-define void @more_bytes_and_not_null(i32* dereferenceable_or_null(8) %ptr) {
+define void @more_bytes_and_not_null(ptr dereferenceable_or_null(8) %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@more_bytes_and_not_null
-; CHECK-SAME: (i32* nocapture nofree nonnull readnone align 4 dereferenceable(16) [[PTR:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 4 dereferenceable(16) [[PTR:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%arrayidx3 = getelementptr i32, i32* %ptr, i64 3
- %arrayidx1 = getelementptr i32, i32* %ptr, i64 1
- %arrayidx0 = getelementptr i32, i32* %ptr, i64 0
- %arrayidx2 = getelementptr i32, i32* %ptr, i64 2
- %t3 = load i32, i32* %arrayidx3
- %t1 = load i32, i32* %arrayidx1
- %t2 = load i32, i32* %arrayidx2
- %t0 = load i32, i32* %arrayidx0
+ %arrayidx1 = getelementptr i32, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr i32, ptr %ptr, i64 2
+ %t3 = load i32, ptr %arrayidx3
+ %t1 = load i32, ptr %arrayidx1
+ %t2 = load i32, ptr %arrayidx2
+ %t0 = load i32, ptr %ptr
ret void
}
; But don't pessimize existing dereferenceable attribute.
-define void @better_bytes(i32* dereferenceable(100) %ptr) {
+define void @better_bytes(ptr dereferenceable(100) %ptr) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@better_bytes
-; CHECK-SAME: (i32* nocapture nofree nonnull readnone align 4 dereferenceable(100) [[PTR:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 4 dereferenceable(100) [[PTR:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%arrayidx3 = getelementptr i32, i32* %ptr, i64 3
- %arrayidx1 = getelementptr i32, i32* %ptr, i64 1
- %arrayidx0 = getelementptr i32, i32* %ptr, i64 0
- %arrayidx2 = getelementptr i32, i32* %ptr, i64 2
- %t3 = load i32, i32* %arrayidx3
- %t1 = load i32, i32* %arrayidx1
- %t2 = load i32, i32* %arrayidx2
- %t0 = load i32, i32* %arrayidx0
+ %arrayidx1 = getelementptr i32, ptr %ptr, i64 1
+ %arrayidx2 = getelementptr i32, ptr %ptr, i64 2
+ %t3 = load i32, ptr %arrayidx3
+ %t1 = load i32, ptr %arrayidx1
+ %t2 = load i32, ptr %arrayidx2
+ %t0 = load i32, ptr %ptr
ret void
}
-define void @bitcast(i32* %arg) {
+define void @bitcast(ptr %arg) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@bitcast
-; CHECK-SAME: (i32* nocapture nofree nonnull readnone align 4 dereferenceable(8) [[ARG:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 4 dereferenceable(8) [[ARG:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%ptr = bitcast i32* %arg to float*
- %arrayidx0 = getelementptr float, float* %ptr, i64 0
- %arrayidx1 = getelementptr float, float* %ptr, i64 1
- %t0 = load float, float* %arrayidx0
- %t1 = load float, float* %arrayidx1
+ %arrayidx1 = getelementptr float, ptr %ptr, i64 1
+ %t0 = load float, ptr %ptr
+ %t1 = load float, ptr %arrayidx1
ret void
}
-define void @bitcast_different_sizes(double* %arg1, i8* %arg2) {
+define void @bitcast_different_sizes(ptr %arg1, ptr %arg2) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@bitcast_different_sizes
-; CHECK-SAME: (double* nocapture nofree nonnull readnone align 4 dereferenceable(12) [[ARG1:%.*]], i8* nocapture nofree nonnull readnone align 4 dereferenceable(16) [[ARG2:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 4 dereferenceable(12) [[ARG1:%.*]], ptr nocapture nofree nonnull readnone align 4 dereferenceable(16) [[ARG2:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%ptr1 = bitcast double* %arg1 to float*
- %a10 = getelementptr float, float* %ptr1, i64 0
- %a11 = getelementptr float, float* %ptr1, i64 1
- %a12 = getelementptr float, float* %ptr1, i64 2
- %ld10 = load float, float* %a10
- %ld11 = load float, float* %a11
- %ld12 = load float, float* %a12
-
- %ptr2 = bitcast i8* %arg2 to i64*
- %a20 = getelementptr i64, i64* %ptr2, i64 0
- %a21 = getelementptr i64, i64* %ptr2, i64 1
- %ld20 = load i64, i64* %a20
- %ld21 = load i64, i64* %a21
+ %a11 = getelementptr float, ptr %ptr1, i64 1
+ %a12 = getelementptr float, ptr %ptr1, i64 2
+ %ld10 = load float, ptr %ptr1
+ %ld11 = load float, ptr %a11
+ %ld12 = load float, ptr %a12
+
+ %a21 = getelementptr i64, ptr %arg2, i64 1
+ %ld20 = load i64, ptr %arg2
+ %ld21 = load i64, ptr %a21
ret void
}
-define void @negative_offset(i32* %arg) {
+define void @negative_offset(ptr %arg) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(none)
; CHECK-LABEL: define {{[^@]+}}@negative_offset
-; CHECK-SAME: (i32* nocapture nofree nonnull readnone align 4 dereferenceable(4) [[ARG:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture nofree nonnull readnone align 4 dereferenceable(4) [[ARG:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: ret void
;
%ptr = bitcast i32* %arg to float*
- %arrayidx0 = getelementptr float, float* %ptr, i64 0
- %arrayidx1 = getelementptr float, float* %ptr, i64 -1
- %t0 = load float, float* %arrayidx0
- %t1 = load float, float* %arrayidx1
+ %arrayidx1 = getelementptr float, ptr %ptr, i64 -1
+ %t0 = load float, ptr %ptr
+ %t1 = load float, ptr %arrayidx1
ret void
}
-define void @stores(i32* %arg) {
+define void @stores(ptr %arg) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: write)
; CHECK-LABEL: define {{[^@]+}}@stores
-; CHECK-SAME: (i32* nocapture nofree nonnull writeonly align 4 dereferenceable(8) [[ARG:%.*]]) #[[ATTR4:[0-9]+]] {
-; CHECK-NEXT: [[PTR:%.*]] = bitcast i32* [[ARG]] to float*
-; CHECK-NEXT: [[ARRAYIDX0:%.*]] = getelementptr float, float* [[PTR]], i64 0
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr float, float* [[PTR]], i64 1
-; CHECK-NEXT: store float 1.000000e+00, float* [[ARRAYIDX0]], align 4
-; CHECK-NEXT: store float 2.000000e+00, float* [[ARRAYIDX1]], align 4
+; CHECK-SAME: (ptr nocapture nofree nonnull writeonly align 4 dereferenceable(8) [[ARG:%.*]]) #[[ATTR4:[0-9]+]] {
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr float, ptr [[ARG]], i64 1
+; CHECK-NEXT: store float 1.000000e+00, ptr [[ARG]], align 4
+; CHECK-NEXT: store float 2.000000e+00, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: ret void
;
%ptr = bitcast i32* %arg to float*
- %arrayidx0 = getelementptr float, float* %ptr, i64 0
- %arrayidx1 = getelementptr float, float* %ptr, i64 1
- store float 1.0, float* %arrayidx0
- store float 2.0, float* %arrayidx1
+ %arrayidx1 = getelementptr float, ptr %ptr, i64 1
+ store float 1.0, ptr %ptr
+ store float 2.0, ptr %arrayidx1
ret void
}
-define void @load_store(i32* %arg) {
+define void @load_store(ptr %arg) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: write)
; CHECK-LABEL: define {{[^@]+}}@load_store
-; CHECK-SAME: (i32* nocapture nofree nonnull writeonly align 4 dereferenceable(8) [[ARG:%.*]]) #[[ATTR4]] {
-; CHECK-NEXT: [[PTR:%.*]] = bitcast i32* [[ARG]] to float*
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr float, float* [[PTR]], i64 1
-; CHECK-NEXT: store float 2.000000e+00, float* [[ARRAYIDX1]], align 4
+; CHECK-SAME: (ptr nocapture nofree nonnull writeonly align 4 dereferenceable(8) [[ARG:%.*]]) #[[ATTR4]] {
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr float, ptr [[ARG]], i64 1
+; CHECK-NEXT: store float 2.000000e+00, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: ret void
;
%ptr = bitcast i32* %arg to float*
- %arrayidx0 = getelementptr float, float* %ptr, i64 0
- %arrayidx1 = getelementptr float, float* %ptr, i64 1
- %t1 = load float, float* %arrayidx0
- store float 2.0, float* %arrayidx1
+ %arrayidx1 = getelementptr float, ptr %ptr, i64 1
+ %t1 = load float, ptr %ptr
+ store float 2.0, ptr %arrayidx1
ret void
}
-define void @different_size1(i32* %arg) {
+define void @different_size1(ptr %arg) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: write)
; CHECK-LABEL: define {{[^@]+}}@different_size1
-; CHECK-SAME: (i32* nocapture nofree noundef nonnull writeonly align 8 dereferenceable(8) [[ARG:%.*]]) #[[ATTR4]] {
-; CHECK-NEXT: [[ARG_CAST:%.*]] = bitcast i32* [[ARG]] to double*
-; CHECK-NEXT: store double 0.000000e+00, double* [[ARG_CAST]], align 8
-; CHECK-NEXT: store i32 0, i32* [[ARG]], align 8
+; CHECK-SAME: (ptr nocapture nofree noundef nonnull writeonly align 8 dereferenceable(8) [[ARG:%.*]]) #[[ATTR4]] {
+; CHECK-NEXT: store double 0.000000e+00, ptr [[ARG]], align 8
+; CHECK-NEXT: store i32 0, ptr [[ARG]], align 8
; CHECK-NEXT: ret void
;
%arg-cast = bitcast i32* %arg to double*
- store double 0.000000e+00, double* %arg-cast
- store i32 0, i32* %arg
+ store double 0.000000e+00, ptr %arg-cast
+ store i32 0, ptr %arg
ret void
}
-define void @different_size2(i32* %arg) {
+define void @different_size2(ptr %arg) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: write)
; CHECK-LABEL: define {{[^@]+}}@different_size2
-; CHECK-SAME: (i32* nocapture nofree noundef nonnull writeonly align 8 dereferenceable(8) [[ARG:%.*]]) #[[ATTR4]] {
-; CHECK-NEXT: store i32 0, i32* [[ARG]], align 8
-; CHECK-NEXT: [[ARG_CAST:%.*]] = bitcast i32* [[ARG]] to double*
-; CHECK-NEXT: store double 0.000000e+00, double* [[ARG_CAST]], align 8
+; CHECK-SAME: (ptr nocapture nofree noundef nonnull writeonly align 8 dereferenceable(8) [[ARG:%.*]]) #[[ATTR4]] {
+; CHECK-NEXT: store i32 0, ptr [[ARG]], align 8
+; CHECK-NEXT: store double 0.000000e+00, ptr [[ARG]], align 8
; CHECK-NEXT: ret void
;
store i32 0, i32* %arg
- %arg-cast = bitcast i32* %arg to double*
- store double 0.000000e+00, double* %arg-cast
+ store double 0.000000e+00, ptr %arg
ret void
}
; Therefore, %p must be dereferenced.
;
; ATTRIBUTOR_CGSCC_NPM-LABEL: define i32 @require_cfg_analysis(i32 %c, i32* {{.*}} dereferenceable(4) %p)
-define i32 @require_cfg_analysis(i32 %c, i32* %p) {
+define i32 @require_cfg_analysis(i32 %c, ptr %p) {
; CHECK: Function Attrs: nofree norecurse nosync nounwind willreturn memory(argmem: write)
; CHECK-LABEL: define {{[^@]+}}@require_cfg_analysis
-; CHECK-SAME: (i32 [[C:%.*]], i32* nocapture nofree nonnull writeonly align 4 dereferenceable(4) [[P:%.*]]) #[[ATTR4]] {
+; CHECK-SAME: (i32 [[C:%.*]], ptr nocapture nofree nonnull writeonly align 4 dereferenceable(4) [[P:%.*]]) #[[ATTR4]] {
; CHECK-NEXT: [[TOBOOL1:%.*]] = icmp eq i32 [[C]], 0
; CHECK-NEXT: br i1 [[TOBOOL1]], label [[L1:%.*]], label [[L2:%.*]]
; CHECK: l1:
; CHECK-NEXT: [[TOBOOL4:%.*]] = icmp eq i32 [[C]], 4
; CHECK-NEXT: br i1 [[TOBOOL4]], label [[L6:%.*]], label [[L7:%.*]]
; CHECK: l6:
-; CHECK-NEXT: store i32 0, i32* [[P]], align 4
+; CHECK-NEXT: store i32 0, ptr [[P]], align 4
; CHECK-NEXT: br label [[END:%.*]]
; CHECK: l7:
-; CHECK-NEXT: store i32 1, i32* [[P]], align 4
+; CHECK-NEXT: store i32 1, ptr [[P]], align 4
; CHECK-NEXT: br label [[END]]
; CHECK: end:
; CHECK-NEXT: ret i32 1
%tobool4 = icmp eq i32 %c, 4
br i1 %tobool4, label %l6, label %l7
l6:
- store i32 0, i32* %p
+ store i32 0, ptr %p
br label %end
l7:
- store i32 1, i32* %p
+ store i32 1, ptr %p
br label %end
end:
ret i32 1
; Instructions before a call that will be pushed to its predecessors
; with uses after the callsite, must be patched up as PHI nodes in
; the join block.
-define i32* @test_split_branch_phi(i32* %ptrarg, i32 %i) {
+define ptr @test_split_branch_phi(ptr %ptrarg, i32 %i) {
Header:
- %tobool = icmp ne i32* %ptrarg, null
+ %tobool = icmp ne ptr %ptrarg, null
br i1 %tobool, label %TBB, label %CallSite
TBB: ; preds = %Header
- %arrayidx = getelementptr inbounds i32, i32* %ptrarg, i64 42
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %ptrarg, i64 42
+ %0 = load i32, ptr %arrayidx, align 4
%tobool1 = icmp ne i32 %0, 0
br i1 %tobool1, label %CallSite, label %End
CallSite: ; preds = %TBB, %Header
- %somepointer = getelementptr i32, i32* %ptrarg, i64 18
- call void @bar(i32* %ptrarg, i32 %i)
+ %somepointer = getelementptr i32, ptr %ptrarg, i64 18
+ call void @bar(ptr %ptrarg, i32 %i)
br label %End
End: ; preds = %CallSite, %TBB
- %somepointerphi = phi i32* [ %somepointer, %CallSite ], [ null, %TBB ]
- ret i32* %somepointerphi
+ %somepointerphi = phi ptr [ %somepointer, %CallSite ], [ null, %TBB ]
+ ret ptr %somepointerphi
}
; NODUP-LABEL: test_split_branch_phi
; NODUP-NOT: split
; CHECK-LABEL: Header.split
-; CHECK: %[[V1:somepointer[0-9]+]] = getelementptr i32, i32* %ptrarg, i64 18
-; CHECK: call void @bar(i32* null, i32 %i)
+; CHECK: %[[V1:somepointer[0-9]+]] = getelementptr i32, ptr %ptrarg, i64 18
+; CHECK: call void @bar(ptr null, i32 %i)
; CHECK: br label %CallSite
; CHECK-LABEL: TBB.split:
-; CHECK: %[[V2:somepointer[0-9]+]] = getelementptr i32, i32* %ptrarg, i64 18
-; CHECK: call void @bar(i32* nonnull %ptrarg, i32 %i)
+; CHECK: %[[V2:somepointer[0-9]+]] = getelementptr i32, ptr %ptrarg, i64 18
+; CHECK: call void @bar(ptr nonnull %ptrarg, i32 %i)
; CHECK: br label %CallSite
; CHECK: CallSite:
-; CHECK: phi i32* [ %[[V1]], %Header.split ], [ %[[V2]], %TBB.split ]
+; CHECK: phi ptr [ %[[V1]], %Header.split ], [ %[[V2]], %TBB.split ]
-define void @split_branch_no_extra_phi(i32* %ptrarg, i32 %i) {
+define void @split_branch_no_extra_phi(ptr %ptrarg, i32 %i) {
Header:
- %tobool = icmp ne i32* %ptrarg, null
+ %tobool = icmp ne ptr %ptrarg, null
br i1 %tobool, label %TBB, label %CallSite
TBB: ; preds = %Header
- %arrayidx = getelementptr inbounds i32, i32* %ptrarg, i64 42
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %ptrarg, i64 42
+ %0 = load i32, ptr %arrayidx, align 4
%tobool1 = icmp ne i32 %0, 0
br i1 %tobool1, label %CallSite, label %End
CallSite: ; preds = %TBB, %Header
%i.add = add i32 %i, 99
- call void @bar(i32* %ptrarg, i32 %i.add)
+ call void @bar(ptr %ptrarg, i32 %i.add)
br label %End
End: ; preds = %CallSite, %TBB
; CHECK-LABEL: split_branch_no_extra_phi
; CHECK-LABEL: Header.split
; CHECK: %[[V1:.+]] = add i32 %i, 99
-; CHECK: call void @bar(i32* null, i32 %[[V1]])
+; CHECK: call void @bar(ptr null, i32 %[[V1]])
; CHECK: br label %CallSite
; CHECK-LABEL: TBB.split:
; CHECK: %[[V2:.+]] = add i32 %i, 99
-; CHECK: call void @bar(i32* nonnull %ptrarg, i32 %[[V2]])
+; CHECK: call void @bar(ptr nonnull %ptrarg, i32 %[[V2]])
; CHECK: br label %CallSite
; CHECK: CallSite:
; CHECK-NOT: phi
; In this test case, the codesize cost of the instructions before the call to
; bar() is equal to the default DuplicationThreshold of 5, because calls are
; more expensive.
-define void @test_no_split_threshold(i32* %ptrarg, i32 %i) {
+define void @test_no_split_threshold(ptr %ptrarg, i32 %i) {
Header:
- %tobool = icmp ne i32* %ptrarg, null
+ %tobool = icmp ne ptr %ptrarg, null
br i1 %tobool, label %TBB, label %CallSite
TBB: ; preds = %Header
- %arrayidx = getelementptr inbounds i32, i32* %ptrarg, i64 42
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %ptrarg, i64 42
+ %0 = load i32, ptr %arrayidx, align 4
%tobool1 = icmp ne i32 %0, 0
br i1 %tobool1, label %CallSite, label %End
%i2 = add i32 %i, 10
call void @bari(i32 %i2)
call void @bari(i32 %i2)
- call void @bar(i32* %ptrarg, i32 %i2)
+ call void @bar(ptr %ptrarg, i32 %i2)
br label %End
End: ; preds = %CallSite, %TBB
; CHECK-LABEL: test_no_split_threshold
; CHECK-NOT: split
; CHECK-LABEL: CallSite:
-; CHECK: call void @bar(i32* %ptrarg, i32 %i2)
+; CHECK: call void @bar(ptr %ptrarg, i32 %i2)
; In this test case, the phi node %l in CallSite should be removed, as after
; moving the call to the split blocks we can use the values directly.
-define void @test_remove_unused_phi(i32* %ptrarg, i32 %i) {
+define void @test_remove_unused_phi(ptr %ptrarg, i32 %i) {
Header:
- %l1 = load i32, i32* undef, align 16
- %tobool = icmp ne i32* %ptrarg, null
+ %l1 = load i32, ptr undef, align 16
+ %tobool = icmp ne ptr %ptrarg, null
br i1 %tobool, label %TBB, label %CallSite
TBB: ; preds = %Header
- %arrayidx = getelementptr inbounds i32, i32* %ptrarg, i64 42
- %0 = load i32, i32* %arrayidx, align 4
- %l2 = load i32, i32* undef, align 16
+ %arrayidx = getelementptr inbounds i32, ptr %ptrarg, i64 42
+ %0 = load i32, ptr %arrayidx, align 4
+ %l2 = load i32, ptr undef, align 16
%tobool1 = icmp ne i32 %0, 0
br i1 %tobool1, label %CallSite, label %End
CallSite: ; preds = %TBB, %Header
%l = phi i32 [ %l1, %Header ], [ %l2, %TBB ]
- call void @bar(i32* %ptrarg, i32 %l)
+ call void @bar(ptr %ptrarg, i32 %l)
br label %End
End: ; preds = %CallSite, %TBB
; NODUP-NOT: split
; CHECK-LABEL: test_remove_unused_phi
; CHECK-LABEL: Header.split
-; CHECK: call void @bar(i32* null, i32 %l1)
+; CHECK: call void @bar(ptr null, i32 %l1)
; CHECK: br label %CallSite
; CHECK-LABEL: TBB.split:
-; CHECK: call void @bar(i32* nonnull %ptrarg, i32 %l2)
+; CHECK: call void @bar(ptr nonnull %ptrarg, i32 %l2)
; CHECK: br label %CallSite
; CHECK-LABEL: CallSite:
; CHECK-NOT: phi
; In this test case, we need to insert a new PHI node in TailBB to combine
; the loads we moved to the predecessors.
-define void @test_add_new_phi(i32* %ptrarg, i32 %i) {
+define void @test_add_new_phi(ptr %ptrarg, i32 %i) {
Header:
- %tobool = icmp ne i32* %ptrarg, null
+ %tobool = icmp ne ptr %ptrarg, null
br i1 %tobool, label %TBB, label %CallSite
TBB:
br i1 undef, label %CallSite, label %End
CallSite:
- %arrayidx112 = getelementptr inbounds i32, i32* undef, i64 1
- %0 = load i32, i32* %arrayidx112, align 4
- call void @bar(i32* %ptrarg, i32 %i)
+ %arrayidx112 = getelementptr inbounds i32, ptr undef, i64 1
+ %0 = load i32, ptr %arrayidx112, align 4
+ call void @bar(ptr %ptrarg, i32 %i)
%sub = sub nsw i32 %0, undef
br label %End
; NODUP-NOT: split
; CHECK-LABEL: test_add_new_phi
; CHECK-LABEL: Header.split
-; CHECK: %[[V1:.+]] = load i32, i32*
-; CHECK: call void @bar(i32* null, i32 %i)
+; CHECK: %[[V1:.+]] = load i32, ptr
+; CHECK: call void @bar(ptr null, i32 %i)
; CHECK: br label %CallSite
; CHECK-LABEL: TBB.split:
-; CHECK: %[[V2:.+]] = load i32, i32*
-; CHECK: call void @bar(i32* nonnull %ptrarg, i32 %i)
+; CHECK: %[[V2:.+]] = load i32, ptr
+; CHECK: call void @bar(ptr nonnull %ptrarg, i32 %i)
; CHECK: br label %CallSite
; CHECK-LABEL: CallSite:
; CHECK-NEXT: %[[V3:.+]] = phi i32 [ %[[V1]], %Header.split ], [ %[[V2]], %TBB.split ]
; CHECK: %sub = sub nsw i32 %[[V3]], undef
-define i32 @test_firstnophi(i32* %a, i32 %v) {
+define i32 @test_firstnophi(ptr %a, i32 %v) {
Header:
- %tobool1 = icmp eq i32* %a, null
+ %tobool1 = icmp eq ptr %a, null
br i1 %tobool1, label %Tail, label %TBB
TBB:
Tail:
%p = phi i32[1,%Header], [2, %TBB]
- store i32 %v, i32* %a
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ store i32 %v, ptr %a
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
; NODUP-NOT: split:
; CHECK-LABEL: @test_firstnophi
; CHECK-LABEL: Header.split:
-; CHECK-NEXT: store i32 %v, i32* %a
-; CHECK-NEXT: %[[CALL1:.*]] = call i32 @callee(i32* null, i32 %v, i32 1)
+; CHECK-NEXT: store i32 %v, ptr %a
+; CHECK-NEXT: %[[CALL1:.*]] = call i32 @callee(ptr null, i32 %v, i32 1)
; CHECK-NEXT: br label %Tail
; CHECK-LABEL: TBB.split:
-; CHECK-NEXT: store i32 %v, i32* %a
-; CHECK-NEXT: %[[CALL2:.*]] = call i32 @callee(i32* nonnull %a, i32 1, i32 2)
+; CHECK-NEXT: store i32 %v, ptr %a
+; CHECK-NEXT: %[[CALL2:.*]] = call i32 @callee(ptr nonnull %a, i32 1, i32 2)
; CHECK-NEXT: br label %Tail
; CHECK-LABEL: Tail:
; CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL1]], %Header.split ], [ %[[CALL2]], %TBB.split ]
; CHECK: ret i32 %[[MERGED]]
-define i32 @callee(i32* %a, i32 %v, i32 %p) {
+define i32 @callee(ptr %a, i32 %v, i32 %p) {
ret i32 0
}
-define void @test_no_remove_used_phi(i32* %ptrarg, i32 %i) {
+define void @test_no_remove_used_phi(ptr %ptrarg, i32 %i) {
Header:
- %l1 = load i32, i32* undef, align 16
- %tobool = icmp ne i32* %ptrarg, null
+ %l1 = load i32, ptr undef, align 16
+ %tobool = icmp ne ptr %ptrarg, null
br i1 %tobool, label %TBB, label %CallSite
TBB: ; preds = %Header
- %arrayidx = getelementptr inbounds i32, i32* %ptrarg, i64 42
- %0 = load i32, i32* %arrayidx, align 4
- %l2 = load i32, i32* undef, align 16
+ %arrayidx = getelementptr inbounds i32, ptr %ptrarg, i64 42
+ %0 = load i32, ptr %arrayidx, align 4
+ %l2 = load i32, ptr undef, align 16
%tobool1 = icmp ne i32 %0, 0
br i1 %tobool1, label %CallSite, label %End
CallSite: ; preds = %TBB, %Header
%l = phi i32 [ %l1, %Header ], [ %l2, %TBB ]
- call void @bar(i32* %ptrarg, i32 %l)
+ call void @bar(ptr %ptrarg, i32 %l)
call void @bari(i32 %l)
br label %End
; NODUP-NOT: split
; CHECK-LABEL: @test_no_remove_used_phi
; CHECK-LABEL: Header.split:
-; CHECK: call void @bar(i32* null, i32 %l1)
+; CHECK: call void @bar(ptr null, i32 %l1)
; CHECK-NEXT: br label %CallSite
; CHECK-LABEL: TBB.split:
-; CHECK: call void @bar(i32* nonnull %ptrarg, i32 %l2)
+; CHECK: call void @bar(ptr nonnull %ptrarg, i32 %l2)
; CHECK-NEXT: br label %CallSite
; CHECK-LABEL: CallSite:
; CHECK-NEXT: %l = phi i32 [ %l1, %Header.split ], [ %l2, %TBB.split ]
; CHECK: call void @bari(i32 %l)
-define void @bar(i32*, i32) {
+define void @bar(ptr, i32) {
ret void
}
; CHECK-LABEL: Header:
; CHECK-NEXT: br i1 undef, label %Header.split
; CHECK-LABEL: Header.split:
-; CHECK: %[[CALL1:.*]] = call i32 @callee(i32* %a, i32 %v, i32 %p)
+; CHECK: %[[CALL1:.*]] = call i32 @callee(ptr %a, i32 %v, i32 %p)
; CHECK-LABEL: TBB:
; CHECK: br i1 %cmp, label %TBB.split
; CHECK-LABEL: TBB.split:
-; CHECK: %[[CALL2:.*]] = call i32 @callee(i32* null, i32 %v, i32 %p)
+; CHECK: %[[CALL2:.*]] = call i32 @callee(ptr null, i32 %v, i32 %p)
; CHECK-LABEL: Tail
; CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL1]], %Header.split ], [ %[[CALL2]], %TBB.split ]
; CHECK: ret i32 %[[MERGED]]
-define i32 @test_simple(i32* %a, i32 %v, i32 %p) {
+define i32 @test_simple(ptr %a, i32 %v, i32 %p) {
Header:
br i1 undef, label %Tail, label %End
TBB:
- %cmp = icmp eq i32* %a, null
+ %cmp = icmp eq ptr %a, null
br i1 %cmp, label %Tail, label %End
Tail:
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
; CHECK-LABEL: Header:
; CHECK: br i1 %tobool1, label %TBB1, label %Header.split
; CHECK-LABEL: Header.split:
-; CHECK: %[[CALL1:.*]] = call i32 @callee(i32* nonnull %a, i32 %v, i32 %p)
+; CHECK: %[[CALL1:.*]] = call i32 @callee(ptr nonnull %a, i32 %v, i32 %p)
; CHECK-LABEL: TBB2:
; CHECK: br i1 %cmp2, label %TBB2.split, label %End
; CHECK-LABEL: TBB2.split:
-; CHECK: %[[CALL2:.*]] = call i32 @callee(i32* null, i32 1, i32 99)
+; CHECK: %[[CALL2:.*]] = call i32 @callee(ptr null, i32 1, i32 99)
; CHECK-LABEL: Tail
; CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL1]], %Header.split ], [ %[[CALL2]], %TBB2.split ]
; CHECK: ret i32 %[[MERGED]]
-define i32 @test_eq_eq_eq_untaken2(i32* %a, i32 %v, i32 %p) {
+define i32 @test_eq_eq_eq_untaken2(ptr %a, i32 %v, i32 %p) {
Header:
- %tobool1 = icmp eq i32* %a, null
+ %tobool1 = icmp eq ptr %a, null
br i1 %tobool1, label %TBB1, label %Tail
TBB1:
br i1 %cmp2, label %Tail, label %End
Tail:
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
; CHECK-LABEL: Header:
; CHECK: br i1 %tobool1, label %TBB1, label %Header.split
; CHECK-LABEL: Header.split:
-; CHECK: %[[CALL1:.*]] = call i32 @callee(i32* nonnull %a, i32 %v, i32 %p)
+; CHECK: %[[CALL1:.*]] = call i32 @callee(ptr nonnull %a, i32 %v, i32 %p)
; CHECK-LABEL: TBB2:
; CHECK: br i1 %cmp2, label %TBB2.split, label %End
; CHECK-LABEL: TBB2.split:
-; CHECK: %[[CALL2:.*]] = call i32 @callee(i32* null, i32 %v, i32 99)
+; CHECK: %[[CALL2:.*]] = call i32 @callee(ptr null, i32 %v, i32 99)
; CHECK-LABEL: Tail
; CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL1]], %Header.split ], [ %[[CALL2]], %TBB2.split ]
; CHECK: ret i32 %[[MERGED]]
-define i32 @test_eq_ne_eq_untaken(i32* %a, i32 %v, i32 %p) {
+define i32 @test_eq_ne_eq_untaken(ptr %a, i32 %v, i32 %p) {
Header:
- %tobool1 = icmp eq i32* %a, null
+ %tobool1 = icmp eq ptr %a, null
br i1 %tobool1, label %TBB1, label %Tail
TBB1:
br i1 %cmp2, label %Tail, label %End
Tail:
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
; CHECK: Header2:
; CHECK:br i1 %tobool2, label %Header2.split, label %TBB1
; CHECK-LABEL: Header2.split:
-; CHECK: %[[CALL1:.*]] = call i32 @callee(i32* nonnull %a, i32 %v, i32 10)
+; CHECK: %[[CALL1:.*]] = call i32 @callee(ptr nonnull %a, i32 %v, i32 10)
; CHECK-LABEL: TBB2:
; CHECK: br i1 %cmp2, label %TBB2.split, label %End
; CHECK-LABEL: TBB2.split:
; NOTE: CallSiteSplitting cannot infer that %a is null here, as it currently
; only supports recording conditions along a single predecessor path.
-; CHECK: %[[CALL2:.*]] = call i32 @callee(i32* %a, i32 1, i32 99)
+; CHECK: %[[CALL2:.*]] = call i32 @callee(ptr %a, i32 1, i32 99)
; CHECK-LABEL: Tail
; CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL1]], %Header2.split ], [ %[[CALL2]], %TBB2.split ]
; CHECK: ret i32 %[[MERGED]]
-define i32 @test_header_header2_tbb(i32* %a, i32 %v, i32 %p) {
+define i32 @test_header_header2_tbb(ptr %a, i32 %v, i32 %p) {
Header:
- %tobool1 = icmp eq i32* %a, null
+ %tobool1 = icmp eq ptr %a, null
br i1 %tobool1, label %TBB1, label %Header2
Header2:
br i1 %cmp2, label %Tail, label %End
Tail:
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
ret i32 %v
}
-define i32 @callee(i32* %a, i32 %v, i32 %p) {
+define i32 @callee(ptr %a, i32 %v, i32 %p) {
ret i32 10
}
; RUN: opt < %s -passes=callsite-splitting -S | FileCheck %s
; RUN: opt < %s -passes='function(callsite-splitting)' -S | FileCheck %s
-define i32 @callee(i32*, i32, i32) {
+define i32 @callee(ptr, i32, i32) {
ret i32 10
}
; CHECK-LABEL: @test_preds_equal
; CHECK-NOT: split
; CHECK: br i1 %cmp, label %Tail, label %Tail
-define i32 @test_preds_equal(i32* %a, i32 %v, i32 %p) {
+define i32 @test_preds_equal(ptr %a, i32 %v, i32 %p) {
TBB:
- %cmp = icmp eq i32* %a, null
+ %cmp = icmp eq ptr %a, null
br i1 %cmp, label %Tail, label %Tail
Tail:
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
}
;CHECK-LABEL: Header:
;CHECK: br i1 %tobool1, label %Header.split, label %TBB
;CHECK-LABEL: Header.split:
-;CHECK: %[[CALL1:.*]] = call i32 @callee(i32* null, i32 %v, i32 1)
+;CHECK: %[[CALL1:.*]] = call i32 @callee(ptr null, i32 %v, i32 1)
;CHECK-LABEL: TBB:
;CHECK: br i1 %cmp, label %TBB.split, label %End
;CHECK-LABEL: TBB.split:
-;CHECK: %[[CALL2:.*]] = call i32 @callee(i32* nonnull %a, i32 1, i32 2)
+;CHECK: %[[CALL2:.*]] = call i32 @callee(ptr nonnull %a, i32 1, i32 2)
;CHECK-LABEL: Tail
;CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL1]], %Header.split ], [ %[[CALL2]], %TBB.split ]
;CHECK: ret i32 %[[MERGED]]
-define i32 @test_eq_eq(i32* %a, i32 %v) {
+define i32 @test_eq_eq(ptr %a, i32 %v) {
Header:
- %tobool1 = icmp eq i32* %a, null
+ %tobool1 = icmp eq ptr %a, null
br i1 %tobool1, label %Tail, label %TBB
TBB:
Tail:
%p = phi i32[1,%Header], [2, %TBB]
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
;CHECK-LABEL: @test_eq_eq_eq
;CHECK-LABEL: Header2.split:
-;CHECK: %[[CALL1:.*]] = call i32 @callee(i32* %a, i32 %v, i32 10)
+;CHECK: %[[CALL1:.*]] = call i32 @callee(ptr %a, i32 %v, i32 10)
;CHECK-LABEL: TBB.split:
-;CHECK: %[[CALL2:.*]] = call i32 @callee(i32* %a, i32 1, i32 %p)
+;CHECK: %[[CALL2:.*]] = call i32 @callee(ptr %a, i32 1, i32 %p)
;CHECK-LABEL: Tail
;CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL1]], %Header2.split ], [ %[[CALL2]], %TBB.split ]
;CHECK: ret i32 %[[MERGED]]
-define i32 @test_eq_eq_eq(i32* %a, i32 %v, i32 %p) {
+define i32 @test_eq_eq_eq(ptr %a, i32 %v, i32 %p) {
Header:
- %tobool1 = icmp eq i32* %a, null
+ %tobool1 = icmp eq ptr %a, null
br i1 %tobool1, label %Header2, label %End
Header2:
br i1 %cmp, label %Tail, label %End
Tail:
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
;CHECK-LABEL: @test_eq_eq_eq_constrain_same_i32_arg
;CHECK-LABEL: Header2.split:
-;CHECK: %[[CALL1:.*]] = call i32 @callee(i32* %a, i32 222, i32 %p)
+;CHECK: %[[CALL1:.*]] = call i32 @callee(ptr %a, i32 222, i32 %p)
;CHECK-LABEL: TBB.split:
-;CHECK: %[[CALL2:.*]] = call i32 @callee(i32* %a, i32 333, i32 %p)
+;CHECK: %[[CALL2:.*]] = call i32 @callee(ptr %a, i32 333, i32 %p)
;CHECK-LABEL: Tail
;CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL1]], %Header2.split ], [ %[[CALL2]], %TBB.split ]
;CHECK: ret i32 %[[MERGED]]
-define i32 @test_eq_eq_eq_constrain_same_i32_arg(i32* %a, i32 %v, i32 %p) {
+define i32 @test_eq_eq_eq_constrain_same_i32_arg(ptr %a, i32 %v, i32 %p) {
Header:
%tobool1 = icmp eq i32 %v, 111
br i1 %tobool1, label %Header2, label %End
br i1 %cmp, label %Tail, label %End
Tail:
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
;CHECK-LABEL: @test_ne_eq
;CHECK-LABEL: Header.split:
-;CHECK: %[[CALL1:.*]] = call i32 @callee(i32* nonnull %a, i32 %v, i32 1)
+;CHECK: %[[CALL1:.*]] = call i32 @callee(ptr nonnull %a, i32 %v, i32 1)
;CHECK-LABEL: TBB.split:
-;CHECK: %[[CALL2:.*]] = call i32 @callee(i32* null, i32 1, i32 2)
+;CHECK: %[[CALL2:.*]] = call i32 @callee(ptr null, i32 1, i32 2)
;CHECK-LABEL: Tail
;CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL1]], %Header.split ], [ %[[CALL2]], %TBB.split ]
;CHECK: ret i32 %[[MERGED]]
-define i32 @test_ne_eq(i32* %a, i32 %v) {
+define i32 @test_ne_eq(ptr %a, i32 %v) {
Header:
- %tobool1 = icmp ne i32* %a, null
+ %tobool1 = icmp ne ptr %a, null
br i1 %tobool1, label %Tail, label %TBB
TBB:
Tail:
%p = phi i32[1,%Header], [2, %TBB]
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
;CHECK-LABEL: @test_ne_eq_ne
;CHECK-LABEL: Header2.split:
-;CHECK: %[[CALL1:.*]] = call i32 @callee(i32* nonnull %a, i32 %v, i32 10)
+;CHECK: %[[CALL1:.*]] = call i32 @callee(ptr nonnull %a, i32 %v, i32 10)
;CHECK-LABEL: TBB.split:
-;CHECK: %[[CALL2:.*]] = call i32 @callee(i32* %a, i32 %v, i32 %p)
+;CHECK: %[[CALL2:.*]] = call i32 @callee(ptr %a, i32 %v, i32 %p)
;CHECK-LABEL: Tail
;CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL1]], %Header2.split ], [ %[[CALL2]], %TBB.split ]
;CHECK: ret i32 %[[MERGED]]
-define i32 @test_ne_eq_ne(i32* %a, i32 %v, i32 %p) {
+define i32 @test_ne_eq_ne(ptr %a, i32 %v, i32 %p) {
Header:
- %tobool1 = icmp ne i32* %a, null
+ %tobool1 = icmp ne ptr %a, null
br i1 %tobool1, label %Header2, label %TBB
Header2:
br i1 %cmp, label %Tail, label %End
Tail:
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
;CHECK-LABEL: @test_ne_ne
;CHECK-LABEL: Header.split:
-;CHECK: %[[CALL1:.*]] = call i32 @callee(i32* nonnull %a, i32 %v, i32 1)
+;CHECK: %[[CALL1:.*]] = call i32 @callee(ptr nonnull %a, i32 %v, i32 1)
;CHECK-LABEL: TBB.split:
-;CHECK: %[[CALL2:.*]] = call i32 @callee(i32* null, i32 %v, i32 2)
+;CHECK: %[[CALL2:.*]] = call i32 @callee(ptr null, i32 %v, i32 2)
;CHECK-LABEL: Tail
;CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL1]], %Header.split ], [ %[[CALL2]], %TBB.split ]
;CHECK: ret i32 %[[MERGED]]
-define i32 @test_ne_ne(i32* %a, i32 %v) {
+define i32 @test_ne_ne(ptr %a, i32 %v) {
Header:
- %tobool1 = icmp ne i32* %a, null
+ %tobool1 = icmp ne ptr %a, null
br i1 %tobool1, label %Tail, label %TBB
TBB:
Tail:
%p = phi i32[1,%Header], [2, %TBB]
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
;CHECK-LABEL: @test_ne_ne_ne_constrain_same_pointer_arg
;CHECK-LABEL: Header2.split:
-;CHECK: %[[CALL1:.*]] = call i32 @callee(i32* nonnull %a, i32 %v, i32 %p)
+;CHECK: %[[CALL1:.*]] = call i32 @callee(ptr nonnull %a, i32 %v, i32 %p)
;CHECK-LABEL: TBB.split:
-;CHECK: %[[CALL2:.*]] = call i32 @callee(i32* %a, i32 %v, i32 %p)
+;CHECK: %[[CALL2:.*]] = call i32 @callee(ptr %a, i32 %v, i32 %p)
;CHECK-LABEL: Tail
;CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL1]], %Header2.split ], [ %[[CALL2]], %TBB.split ]
;CHECK: ret i32 %[[MERGED]]
-define i32 @test_ne_ne_ne_constrain_same_pointer_arg(i32* %a, i32 %v, i32 %p, i32* %a2, i32* %a3) {
+define i32 @test_ne_ne_ne_constrain_same_pointer_arg(ptr %a, i32 %v, i32 %p, ptr %a2, ptr %a3) {
Header:
- %tobool1 = icmp ne i32* %a, null
+ %tobool1 = icmp ne ptr %a, null
br i1 %tobool1, label %Header2, label %TBB
Header2:
- %tobool2 = icmp ne i32* %a, %a2
+ %tobool2 = icmp ne ptr %a, %a2
br i1 %tobool2, label %Tail, label %TBB
TBB:
- %cmp = icmp ne i32* %a, %a3
+ %cmp = icmp ne ptr %a, %a3
br i1 %cmp, label %Tail, label %End
Tail:
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
;CHECK-LABEL: @test_eq_eq_untaken
;CHECK-LABEL: Header.split:
-;CHECK: %[[CALL1:.*]] = call i32 @callee(i32* nonnull %a, i32 %v, i32 1)
+;CHECK: %[[CALL1:.*]] = call i32 @callee(ptr nonnull %a, i32 %v, i32 1)
;CHECK-LABEL: TBB.split:
-;CHECK: %[[CALL2:.*]] = call i32 @callee(i32* null, i32 1, i32 2)
+;CHECK: %[[CALL2:.*]] = call i32 @callee(ptr null, i32 1, i32 2)
;CHECK-LABEL: Tail
;CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL1]], %Header.split ], [ %[[CALL2]], %TBB.split ]
;CHECK: ret i32 %[[MERGED]]
-define i32 @test_eq_eq_untaken(i32* %a, i32 %v) {
+define i32 @test_eq_eq_untaken(ptr %a, i32 %v) {
Header:
- %tobool1 = icmp eq i32* %a, null
+ %tobool1 = icmp eq ptr %a, null
br i1 %tobool1, label %TBB, label %Tail
TBB:
Tail:
%p = phi i32[1,%Header], [2, %TBB]
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
;CHECK-LABEL: @test_eq_eq_eq_untaken
;CHECK-LABEL: Header2.split:
-;CHECK: %[[CALL1:.*]] = call i32 @callee(i32* nonnull %a, i32 %v, i32 10)
+;CHECK: %[[CALL1:.*]] = call i32 @callee(ptr nonnull %a, i32 %v, i32 10)
;CHECK-LABEL: TBB.split:
-;CHECK: %[[CALL2:.*]] = call i32 @callee(i32* %a, i32 1, i32 %p)
+;CHECK: %[[CALL2:.*]] = call i32 @callee(ptr %a, i32 1, i32 %p)
;CHECK-LABEL: Tail
;CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL1]], %Header2.split ], [ %[[CALL2]], %TBB.split ]
;CHECK: ret i32 %[[MERGED]]
-define i32 @test_eq_eq_eq_untaken(i32* %a, i32 %v, i32 %p) {
+define i32 @test_eq_eq_eq_untaken(ptr %a, i32 %v, i32 %p) {
Header:
- %tobool1 = icmp eq i32* %a, null
+ %tobool1 = icmp eq ptr %a, null
br i1 %tobool1, label %TBB, label %Header2
Header2:
br i1 %cmp, label %Tail, label %End
Tail:
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
;CHECK-LABEL: @test_ne_eq_untaken
;CHECK-LABEL: Header.split:
-;CHECK: %[[CALL1:.*]] = call i32 @callee(i32* null, i32 %v, i32 1)
+;CHECK: %[[CALL1:.*]] = call i32 @callee(ptr null, i32 %v, i32 1)
;CHECK-LABEL: TBB.split:
-;CHECK: %[[CALL2:.*]] = call i32 @callee(i32* nonnull %a, i32 1, i32 2)
+;CHECK: %[[CALL2:.*]] = call i32 @callee(ptr nonnull %a, i32 1, i32 2)
;CHECK-LABEL: Tail
;CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL1]], %Header.split ], [ %[[CALL2]], %TBB.split ]
;CHECK: ret i32 %[[MERGED]]
-define i32 @test_ne_eq_untaken(i32* %a, i32 %v) {
+define i32 @test_ne_eq_untaken(ptr %a, i32 %v) {
Header:
- %tobool1 = icmp ne i32* %a, null
+ %tobool1 = icmp ne ptr %a, null
br i1 %tobool1, label %TBB, label %Tail
TBB:
Tail:
%p = phi i32[1,%Header], [2, %TBB]
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
;CHECK-LABEL: @test_ne_eq_ne_untaken
;CHECK-LABEL: Header2.split:
-;CHECK: %[[CALL1:.*]] = call i32 @callee(i32* null, i32 %v, i32 10)
+;CHECK: %[[CALL1:.*]] = call i32 @callee(ptr null, i32 %v, i32 10)
;CHECK-LABEL: TBB.split:
-;CHECK: %[[CALL2:.*]] = call i32 @callee(i32* %a, i32 %v, i32 %p)
+;CHECK: %[[CALL2:.*]] = call i32 @callee(ptr %a, i32 %v, i32 %p)
;CHECK-LABEL: Tail
;CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL1]], %Header2.split ], [ %[[CALL2]], %TBB.split ]
;CHECK: ret i32 %[[MERGED]]
-define i32 @test_ne_eq_ne_untaken(i32* %a, i32 %v, i32 %p) {
+define i32 @test_ne_eq_ne_untaken(ptr %a, i32 %v, i32 %p) {
Header:
- %tobool1 = icmp ne i32* %a, null
+ %tobool1 = icmp ne ptr %a, null
br i1 %tobool1, label %TBB, label %Header2
Header2:
br i1 %cmp, label %Tail, label %End
Tail:
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
;CHECK-LABEL: @test_ne_ne_untaken
;CHECK-LABEL: Header.split:
-;CHECK: %[[CALL1:.*]] = call i32 @callee(i32* null, i32 %v, i32 1)
+;CHECK: %[[CALL1:.*]] = call i32 @callee(ptr null, i32 %v, i32 1)
;CHECK-LABEL: TBB.split:
-;CHECK: %[[CALL2:.*]] = call i32 @callee(i32* nonnull %a, i32 1, i32 2)
+;CHECK: %[[CALL2:.*]] = call i32 @callee(ptr nonnull %a, i32 1, i32 2)
;CHECK-LABEL: Tail
;CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL1]], %Header.split ], [ %[[CALL2]], %TBB.split ]
;CHECK: ret i32 %[[MERGED]]
-define i32 @test_ne_ne_untaken(i32* %a, i32 %v) {
+define i32 @test_ne_ne_untaken(ptr %a, i32 %v) {
Header:
- %tobool1 = icmp ne i32* %a, null
+ %tobool1 = icmp ne ptr %a, null
br i1 %tobool1, label %TBB, label %Tail
TBB:
Tail:
%p = phi i32[1,%Header], [2, %TBB]
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
;CHECK-LABEL: @test_nonconst_const_phi
;CHECK-LABEL: Header.split:
-;CHECK: %[[CALL1:.*]] = call i32 @callee(i32* %a, i32 %v, i32 1)
+;CHECK: %[[CALL1:.*]] = call i32 @callee(ptr %a, i32 %v, i32 1)
;CHECK-LABEL: TBB.split:
-;CHECK: %[[CALL2:.*]] = call i32 @callee(i32* %a, i32 1, i32 2)
+;CHECK: %[[CALL2:.*]] = call i32 @callee(ptr %a, i32 1, i32 2)
;CHECK-LABEL: Tail
;CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL1]], %Header.split ], [ %[[CALL2]], %TBB.split ]
;CHECK: ret i32 %[[MERGED]]
-define i32 @test_nonconst_const_phi(i32* %a, i32* %b, i32 %v) {
+define i32 @test_nonconst_const_phi(ptr %a, ptr %b, i32 %v) {
Header:
- %tobool1 = icmp eq i32* %a, %b
+ %tobool1 = icmp eq ptr %a, %b
br i1 %tobool1, label %Tail, label %TBB
TBB:
Tail:
%p = phi i32[1,%Header], [2, %TBB]
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
;CHECK-LABEL: @test_nonconst_nonconst_phi
;CHECK-LABEL: Header.split:
-;CHECK: %[[CALL1:.*]] = call i32 @callee(i32* %a, i32 %v, i32 1)
+;CHECK: %[[CALL1:.*]] = call i32 @callee(ptr %a, i32 %v, i32 1)
;CHECK-LABEL: TBB.split:
-;CHECK: %[[CALL2:.*]] = call i32 @callee(i32* %a, i32 %v, i32 2)
+;CHECK: %[[CALL2:.*]] = call i32 @callee(ptr %a, i32 %v, i32 2)
;CHECK-LABEL: Tail
;CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL2]], %TBB.split ], [ %[[CALL1]], %Header.split ]
;CHECK: ret i32 %[[MERGED]]
-define i32 @test_nonconst_nonconst_phi(i32* %a, i32* %b, i32 %v, i32 %v2) {
+define i32 @test_nonconst_nonconst_phi(ptr %a, ptr %b, i32 %v, i32 %v2) {
Header:
- %tobool1 = icmp eq i32* %a, %b
+ %tobool1 = icmp eq ptr %a, %b
br i1 %tobool1, label %Tail, label %TBB
TBB:
Tail:
%p = phi i32[1,%Header], [2, %TBB]
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
;CHECK-LABEL: @test_cfg_no_or_phi
;CHECK-LABEL: TBB0.split
-;CHECK: %[[CALL1:.*]] = call i32 @callee(i32* %a, i32 %v, i32 1)
+;CHECK: %[[CALL1:.*]] = call i32 @callee(ptr %a, i32 %v, i32 1)
;CHECK-LABEL: TBB1.split:
-;CHECK: %[[CALL2:.*]] = call i32 @callee(i32* %a, i32 %v, i32 2)
+;CHECK: %[[CALL2:.*]] = call i32 @callee(ptr %a, i32 %v, i32 2)
;CHECK-LABEL: Tail
;CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL2]], %TBB1.split ], [ %[[CALL1]], %TBB0.split ]
;CHECK: ret i32 %[[MERGED]]
-define i32 @test_cfg_no_or_phi(i32* %a, i32 %v) {
+define i32 @test_cfg_no_or_phi(ptr %a, i32 %v) {
entry:
br i1 undef, label %TBB0, label %TBB1
TBB0:
br i1 undef, label %Tail, label %End
Tail:
%p = phi i32[1,%TBB0], [2, %TBB1]
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
ret i32 %v
;CHECK-NOT: Header.split:
;CHECK-NOT: TBB.split:
;CHECK-LABEL: Tail:
-;CHECK: %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+;CHECK: %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
;CHECK: ret i32 %r
-define i32 @test_nonconst_nonconst_phi_noncost(i32* %a, i32* %b, i32 %v, i32 %v2) {
+define i32 @test_nonconst_nonconst_phi_noncost(ptr %a, ptr %b, i32 %v, i32 %v2) {
Header:
- %tobool1 = icmp eq i32* %a, %b
+ %tobool1 = icmp eq ptr %a, %b
br i1 %tobool1, label %Tail, label %TBB
TBB:
Tail:
%p = phi i32[%v,%Header], [%v2, %TBB]
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
;CHECK-NOT: Header.split:
;CHECK-NOT: TBB.split:
;CHECK-LABEL: Tail:
-;CHECK: %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+;CHECK: %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
;CHECK: ret i32 %r
-define i32 @test_3preds_constphi(i32* %a, i32 %v, i1 %c1, i1 %c2, i1 %c3) {
+define i32 @test_3preds_constphi(ptr %a, i32 %v, i1 %c1, i1 %c2, i1 %c3) {
Header:
br i1 %c1, label %Tail, label %TBB1
Tail:
%p = phi i32[1,%Header], [2, %TBB1], [3, %TBB2]
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
;CHECK-NOT: Header.split:
;CHECK-NOT: TBB.split:
;CHECK-LABEL: Tail:
-;CHECK: %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+;CHECK: %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
;CHECK: ret i32 %r
-define i32 @test_indirectbr_phi(i8* %address, i32* %a, i32* %b, i32 %v) {
+define i32 @test_indirectbr_phi(ptr %address, ptr %a, ptr %b, i32 %v) {
Header:
- %indirect.goto.dest = select i1 undef, i8* blockaddress(@test_indirectbr_phi, %End), i8* %address
- indirectbr i8* %indirect.goto.dest, [label %TBB, label %Tail]
+ %indirect.goto.dest = select i1 undef, ptr blockaddress(@test_indirectbr_phi, %End), ptr %address
+ indirectbr ptr %indirect.goto.dest, [label %TBB, label %Tail]
TBB:
- %indirect.goto.dest2 = select i1 undef, i8* blockaddress(@test_indirectbr_phi, %End), i8* %address
- indirectbr i8* %indirect.goto.dest2, [label %Tail, label %End]
+ %indirect.goto.dest2 = select i1 undef, ptr blockaddress(@test_indirectbr_phi, %End), ptr %address
+ indirectbr ptr %indirect.goto.dest2, [label %Tail, label %End]
Tail:
%p = phi i32[1,%Header], [2, %TBB]
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
;CHECK-NOT: Header.split:
;CHECK-NOT: TBB.split:
;CHECK-LABEL: Tail:
-;CHECK: %r = call i32 @callee(i32* %a, i32 %v, i32 0)
+;CHECK: %r = call i32 @callee(ptr %a, i32 %v, i32 0)
;CHECK: ret i32 %r
-define i32 @test_cond_no_effect(i32* %a, i32 %v) {
+define i32 @test_cond_no_effect(ptr %a, i32 %v) {
Entry:
- %tobool1 = icmp eq i32* %a, null
+ %tobool1 = icmp eq ptr %a, null
br i1 %tobool1, label %Header, label %End
Header:
br i1 undef, label %Tail, label %End
Tail:
- %r = call i32 @callee(i32* %a, i32 %v, i32 0)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 0)
ret i32 %r
End:
;CHECK-LABEL: @test_unreachable
;CHECK-LABEL: Header.split:
-;CHECK: %[[CALL1:.*]] = call i32 @callee(i32* %a, i32 %v, i32 10)
+;CHECK: %[[CALL1:.*]] = call i32 @callee(ptr %a, i32 %v, i32 10)
;CHECK-LABEL: TBB.split:
-;CHECK: %[[CALL2:.*]] = call i32 @callee(i32* %a, i32 1, i32 %p)
+;CHECK: %[[CALL2:.*]] = call i32 @callee(ptr %a, i32 1, i32 %p)
;CHECK-LABEL: Tail
;CHECK: %[[MERGED:.*]] = phi i32 [ %[[CALL1]], %Header.split ], [ %[[CALL2]], %TBB.split ]
;CHECK: ret i32 %[[MERGED]]
-define i32 @test_unreachable(i32* %a, i32 %v, i32 %p) {
+define i32 @test_unreachable(ptr %a, i32 %v, i32 %p) {
Entry:
br label %End
Header:
%cmp = icmp eq i32 %v, 1
br i1 %cmp, label %Tail, label %Header
Tail:
- %r = call i32 @callee(i32* %a, i32 %v, i32 %p)
+ %r = call i32 @callee(ptr %a, i32 %v, i32 %p)
ret i32 %r
End:
ret i32 %v
}
-define i32 @callee(i32* %a, i32 %v, i32 %p) {
+define i32 @callee(ptr %a, i32 %v, i32 %p) {
entry:
- %c = icmp ne i32* %a, null
+ %c = icmp ne ptr %a, null
br i1 %c, label %BB1, label %BB2
BB1:
- call void @dummy(i32* %a, i32 %p)
+ call void @dummy(ptr %a, i32 %p)
br label %End
BB2:
ret i32 %p
}
-declare void @dummy(i32*, i32)
+declare void @dummy(ptr, i32)
declare void @dummy2(i32, i32)
; Make sure we remove the non-nullness on constant paramater.
;
;CHECK-LABEL: @caller2
;CHECK-LABEL: Top1.split:
-;CHECK: call i32 @callee(i32* inttoptr (i64 4643 to i32*)
-define void @caller2(i32 %c, i32* %a_elt, i32* %b_elt) {
+;CHECK: call i32 @callee(ptr inttoptr (i64 4643 to ptr)
+define void @caller2(i32 %c, ptr %a_elt, ptr %b_elt) {
entry:
br label %Top0
Top0:
- %tobool1 = icmp eq i32* %a_elt, inttoptr (i64 4643 to i32*)
+ %tobool1 = icmp eq ptr %a_elt, inttoptr (i64 4643 to ptr)
br i1 %tobool1, label %Top1, label %NextCond
Top1:
- %tobool2 = icmp ne i32* %a_elt, null
+ %tobool2 = icmp ne ptr %a_elt, null
br i1 %tobool2, label %CallSiteBB, label %NextCond
NextCond:
- %cmp = icmp ne i32* %b_elt, null
+ %cmp = icmp ne ptr %b_elt, null
br i1 %cmp, label %CallSiteBB, label %End
CallSiteBB:
- call i32 @callee(i32* %a_elt, i32 %c, i32 %c)
+ call i32 @callee(ptr %a_elt, i32 %c, i32 %c)
br label %End
End:
; CHECK-LABEL: i32 @test_multiple_phis(
; CHECK: Header.split:
-; CHECK-NEXT: %r2 = call i32 @callee(i32* null, i32 1, i32 5)
+; CHECK-NEXT: %r2 = call i32 @callee(ptr null, i32 1, i32 5)
; CHECK-NEXT: br label %Tail
; CHECK: TBB.split:
-; CHECK-NEXT: %r1 = call i32 @callee(i32* null, i32 2, i32 10)
+; CHECK-NEXT: %r1 = call i32 @callee(ptr null, i32 2, i32 10)
; CHECK-NEXT: br label %Tail
; CHECK: Tail:
%p.0 = phi i32 [0, %Header], [99, %TBB]
%p.1 = phi i32[1, %Header], [2, %TBB]
%p.2 = phi i32 [5, %Header], [10, %TBB]
- %r = call i32 @callee(i32* null, i32 %p.1, i32 %p.2)
+ %r = call i32 @callee(ptr null, i32 %p.1, i32 %p.2)
%res = add i32 %r, %p.0
ret i32 %r
; CHECK-LABEL: CallSite:
; CHECK-NEXT: phi i32 [ [[R2]], %land.rhs.split ], [ [[R1]], %entry.split ], !dbg [[DBG1]]
-define i32 @test1(i32* dereferenceable(4) %cc, i32 %dd) !dbg !6 {
+define i32 @test1(ptr dereferenceable(4) %cc, i32 %dd) !dbg !6 {
entry:
br i1 undef, label %CallSite, label %land.rhs
}
; CHECK-LABEL: @test2
-; CHECK: [[LV1:%.*]] = load i32, i32* %ptr, align 4, !dbg [[DBG_LV:!.*]]
+; CHECK: [[LV1:%.*]] = load i32, ptr %ptr, align 4, !dbg [[DBG_LV:!.*]]
; CHECK-NEXT: [[R1:%.+]] = call i32 @callee(i32 0, i32 10), !dbg [[DBG_CALL:!.*]]
-; CHECK: [[LV2:%.*]] = load i32, i32* %ptr, align 4, !dbg [[DBG_LV]]
+; CHECK: [[LV2:%.*]] = load i32, ptr %ptr, align 4, !dbg [[DBG_LV]]
; CHECK-NEXT: [[R2:%.+]] = call i32 @callee(i32 0, i32 %i), !dbg [[DBG_CALL]]
; CHECK-LABEL: CallSite:
; CHECK-NEXT: phi i32 [ [[LV1]], %Header.split ], [ [[LV2]], %TBB.split ], !dbg [[DBG_LV]]
; CHECK-NEXT: phi i32 [ [[R1]], %Header.split ], [ [[R2]], %TBB.split ], !dbg [[DBG_CALL]]
-define void @test2(i32* %ptr, i32 %i) !dbg !19 {
+define void @test2(ptr %ptr, i32 %i) !dbg !19 {
Header:
%tobool = icmp ne i32 %i, 10
br i1 %tobool, label %TBB, label %CallSite
br i1 undef, label %CallSite, label %End
CallSite: ; preds = %TBB, %Header
- %lv = load i32, i32* %ptr, align 4, !dbg !25
+ %lv = load i32, ptr %ptr, align 4, !dbg !25
%cv = call i32 @callee(i32 0, i32 %i), !dbg !26
%sub = sub nsw i32 %lv, %cv
br label %End
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "aarch64-linaro-linux-gnueabi"
-%struct.bitmap = type { i32, %struct.bitmap* }
+%struct.bitmap = type { i32, ptr }
;CHECK-LABEL: @caller
;CHECK-LABEL: Top.split:
-;CHECK: call void @callee(%struct.bitmap* null, %struct.bitmap* null, %struct.bitmap* %b_elt, i1 false)
+;CHECK: call void @callee(ptr null, ptr null, ptr %b_elt, i1 false)
;CHECK-LABEL: NextCond:
;CHECK: br {{.*}} label %callee.exit
;CHECK-LABEL: callee.exit:
-;CHECK: call void @dummy2(%struct.bitmap* %a_elt)
+;CHECK: call void @dummy2(ptr %a_elt)
-define void @caller(i1 %c, %struct.bitmap* %a_elt, %struct.bitmap* %b_elt) {
+define void @caller(i1 %c, ptr %a_elt, ptr %b_elt) {
entry:
br label %Top
Top:
- %tobool1 = icmp eq %struct.bitmap* %a_elt, null
+ %tobool1 = icmp eq ptr %a_elt, null
br i1 %tobool1, label %CallSiteBB, label %NextCond
NextCond:
- %cmp = icmp ne %struct.bitmap* %b_elt, null
+ %cmp = icmp ne ptr %b_elt, null
br i1 %cmp, label %CallSiteBB, label %End
CallSiteBB:
%p = phi i1 [0, %Top], [%c, %NextCond]
- call void @callee(%struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %b_elt, i1 %p)
+ call void @callee(ptr %a_elt, ptr %a_elt, ptr %b_elt, i1 %p)
br label %End
End:
ret void
}
-define void @callee(%struct.bitmap* %dst_elt, %struct.bitmap* %a_elt, %struct.bitmap* %b_elt, i1 %c) {
+define void @callee(ptr %dst_elt, ptr %a_elt, ptr %b_elt, i1 %c) {
entry:
- %tobool = icmp ne %struct.bitmap* %a_elt, null
- %tobool1 = icmp ne %struct.bitmap* %b_elt, null
+ %tobool = icmp ne ptr %a_elt, null
+ %tobool1 = icmp ne ptr %b_elt, null
%or.cond = and i1 %tobool, %tobool1
br i1 %or.cond, label %Cond, label %Big
Cond:
- %cmp = icmp eq %struct.bitmap* %dst_elt, %a_elt
+ %cmp = icmp eq ptr %dst_elt, %a_elt
br i1 %cmp, label %Small, label %Big
Small:
- call void @dummy2(%struct.bitmap* %a_elt)
+ call void @dummy2(ptr %a_elt)
br label %End
Big:
- call void @dummy1(%struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt)
- call void @dummy1(%struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt)
- call void @dummy1(%struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt)
- call void @dummy1(%struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt)
- call void @dummy1(%struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt)
- call void @dummy1(%struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt)
- call void @dummy1(%struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt, %struct.bitmap* %a_elt)
+ call void @dummy1(ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt)
+ call void @dummy1(ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt)
+ call void @dummy1(ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt)
+ call void @dummy1(ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt)
+ call void @dummy1(ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt)
+ call void @dummy1(ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt)
+ call void @dummy1(ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt, ptr %a_elt)
br label %End
End:
ret void
}
-declare void @dummy2(%struct.bitmap*)
-declare void @dummy1(%struct.bitmap*, %struct.bitmap*, %struct.bitmap*, %struct.bitmap*, %struct.bitmap*, %struct.bitmap*)
+declare void @dummy2(ptr)
+declare void @dummy1(ptr, ptr, ptr, ptr, ptr, ptr)
;CHECK-LABEL: @caller2
;CHECK: call void @dummy3()
;CheCK-LABEL: CallSiteBB:
;CHECK: call void @foo(i1 %tobool1)
-define void @caller2(i1 %c, %struct.bitmap* %a_elt, %struct.bitmap* %b_elt, %struct.bitmap* %c_elt) {
+define void @caller2(i1 %c, ptr %a_elt, ptr %b_elt, ptr %c_elt) {
entry:
br label %Top
Top:
- %tobool1 = icmp eq %struct.bitmap* %a_elt, %b_elt
+ %tobool1 = icmp eq ptr %a_elt, %b_elt
br i1 %tobool1, label %CallSiteBB, label %NextCond
NextCond:
- %cmp = icmp ne %struct.bitmap* %b_elt, %c_elt
+ %cmp = icmp ne ptr %b_elt, %c_elt
br i1 %cmp, label %CallSiteBB, label %End
CallSiteBB:
; CHECK-LABEL: define void @convergent_caller(
; CHECK: call void @convergent_callee(
; CHECK-NOT: call void @convergent_callee(
-define void @convergent_caller(i1 %c, i8* %a_elt, i8* %b_elt) #0 {
+define void @convergent_caller(i1 %c, ptr %a_elt, ptr %b_elt) #0 {
entry:
br label %Top
Top:
- %tobool1 = icmp eq i8* %a_elt, null
+ %tobool1 = icmp eq ptr %a_elt, null
br i1 %tobool1, label %CallSiteBB, label %NextCond
NextCond:
- %cmp = icmp ne i8* %b_elt, null
+ %cmp = icmp ne ptr %b_elt, null
br i1 %cmp, label %CallSiteBB, label %End
CallSiteBB:
%p = phi i1 [ false, %Top ], [ %c, %NextCond ]
- call void @convergent_callee(i8* %a_elt, i1 %p)
+ call void @convergent_callee(ptr %a_elt, i1 %p)
br label %End
End:
; CHECK-LABEL: define void @convergent_callee(
; CHECK: call void @convergent_external(
; CHECK-NOT: call void @convergent_external(
-define void @convergent_callee(i8* %a_elt, i1 %c) #0 {
+define void @convergent_callee(ptr %a_elt, i1 %c) #0 {
entry:
- %tobool = icmp ne i8* %a_elt, null
+ %tobool = icmp ne ptr %a_elt, null
br i1 %tobool, label %then, label %endif
then:
br label %endif
endif:
- call void @convergent_external(i8* %a_elt) #0
+ call void @convergent_external(ptr %a_elt) #0
ret void
}
; CHECK-LABEL: define void @reference_caller(
; CHECK: call void @nonconvergent_callee(
; CHECK: call void @nonconvergent_callee(
-define void @reference_caller(i1 %c, i8* %a_elt, i8* %b_elt) #1 {
+define void @reference_caller(i1 %c, ptr %a_elt, ptr %b_elt) #1 {
entry:
br label %Top
Top:
- %tobool1 = icmp eq i8* %a_elt, null
+ %tobool1 = icmp eq ptr %a_elt, null
br i1 %tobool1, label %CallSiteBB, label %NextCond
NextCond:
- %cmp = icmp ne i8* %b_elt, null
+ %cmp = icmp ne ptr %b_elt, null
br i1 %cmp, label %CallSiteBB, label %End
CallSiteBB:
%p = phi i1 [ false, %Top ], [ %c, %NextCond ]
- call void @nonconvergent_callee(i8* %a_elt, i1 %p)
+ call void @nonconvergent_callee(ptr %a_elt, i1 %p)
br label %End
End:
; CHECK-LABEL: define void @nonconvergent_callee(
; CHECK: call void @nonconvergent_external(
; CHECK-NOT: call void @nonconvergent_external(
-define void @nonconvergent_callee(i8* %a_elt, i1 %c) #1 {
+define void @nonconvergent_callee(ptr %a_elt, i1 %c) #1 {
entry:
- %tobool = icmp ne i8* %a_elt, null
+ %tobool = icmp ne ptr %a_elt, null
br i1 %tobool, label %then, label %endif
then:
br label %endif
endif:
- call void @nonconvergent_external(i8* %a_elt)
+ call void @nonconvergent_external(ptr %a_elt)
ret void
}
-declare void @convergent_external(i8*) #0
-declare void @nonconvergent_external(i8*) #1
+declare void @convergent_external(ptr) #0
+declare void @nonconvergent_external(ptr) #1
attributes #0 = { convergent nounwind }
attributes #1 = { nounwind }
; CHECK: call void @callee
; CHECK-NOT: call void @callee
-declare void @foo(i1* %p);
-declare void @bar(i1* %p);
+declare void @foo(ptr %p);
+declare void @bar(ptr %p);
declare dso_local i32 @__gxx_personality_v0(...)
-define void @caller(i1* %p) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @caller(ptr %p) personality ptr @__gxx_personality_v0 {
entry:
- %0 = icmp eq i1* %p, null
+ %0 = icmp eq ptr %p, null
br i1 %0, label %bb1, label %bb2
bb1:
- invoke void @foo(i1* %p) to label %end1 unwind label %lpad
+ invoke void @foo(ptr %p) to label %end1 unwind label %lpad
bb2:
- invoke void @bar(i1* %p) to label %end2 unwind label %lpad
+ invoke void @bar(ptr %p) to label %end2 unwind label %lpad
lpad:
- %1 = landingpad { i8*, i32 } cleanup
- call void @callee(i1* %p)
- resume { i8*, i32 } %1
+ %1 = landingpad { ptr, i32 } cleanup
+ call void @callee(ptr %p)
+ resume { ptr, i32 } %1
end1:
ret void
ret void
}
-define internal void @callee(i1* %p) {
+define internal void @callee(ptr %p) {
ret void
}
; CHECK-LABEL: define void @noduplicate_caller(
; CHECK: call void @noduplicate_callee(
; CHECK-NOT: call void @noduplicate_callee(
-define void @noduplicate_caller(i1 %c, i8* %a_elt, i8* %b_elt) #0 {
+define void @noduplicate_caller(i1 %c, ptr %a_elt, ptr %b_elt) #0 {
entry:
br label %Top
Top:
- %tobool1 = icmp eq i8* %a_elt, null
+ %tobool1 = icmp eq ptr %a_elt, null
br i1 %tobool1, label %CallSiteBB, label %NextCond
NextCond:
- %cmp = icmp ne i8* %b_elt, null
+ %cmp = icmp ne ptr %b_elt, null
br i1 %cmp, label %CallSiteBB, label %End
CallSiteBB:
%p = phi i1 [ false, %Top ], [ %c, %NextCond ]
- call void @noduplicate_callee(i8* %a_elt, i1 %p)
+ call void @noduplicate_callee(ptr %a_elt, i1 %p)
br label %End
End:
; CHECK-LABEL: define void @noduplicate_callee(
; CHECK: call void @noduplicate_external(
; CHECK-NOT: call void @noduplicate_external(
-define void @noduplicate_callee(i8* %a_elt, i1 %c) #0 {
+define void @noduplicate_callee(ptr %a_elt, i1 %c) #0 {
entry:
- %tobool = icmp ne i8* %a_elt, null
+ %tobool = icmp ne ptr %a_elt, null
br i1 %tobool, label %then, label %endif
then:
br label %endif
endif:
- call void @noduplicate_external(i8* %a_elt) #0
+ call void @noduplicate_external(ptr %a_elt) #0
ret void
}
; CHECK-LABEL: define void @reference_caller(
; CHECK: call void @nonnoduplicate_callee(
; CHECK: call void @nonnoduplicate_callee(
-define void @reference_caller(i1 %c, i8* %a_elt, i8* %b_elt) #1 {
+define void @reference_caller(i1 %c, ptr %a_elt, ptr %b_elt) #1 {
entry:
br label %Top
Top:
- %tobool1 = icmp eq i8* %a_elt, null
+ %tobool1 = icmp eq ptr %a_elt, null
br i1 %tobool1, label %CallSiteBB, label %NextCond
NextCond:
- %cmp = icmp ne i8* %b_elt, null
+ %cmp = icmp ne ptr %b_elt, null
br i1 %cmp, label %CallSiteBB, label %End
CallSiteBB:
%p = phi i1 [ false, %Top ], [ %c, %NextCond ]
- call void @nonnoduplicate_callee(i8* %a_elt, i1 %p)
+ call void @nonnoduplicate_callee(ptr %a_elt, i1 %p)
br label %End
End:
; CHECK-LABEL: define void @nonnoduplicate_callee(
; CHECK: call void @nonnoduplicate_external(
; CHECK-NOT: call void @nonnoduplicate_external(
-define void @nonnoduplicate_callee(i8* %a_elt, i1 %c) #1 {
+define void @nonnoduplicate_callee(ptr %a_elt, i1 %c) #1 {
entry:
- %tobool = icmp ne i8* %a_elt, null
+ %tobool = icmp ne ptr %a_elt, null
br i1 %tobool, label %then, label %endif
then:
br label %endif
endif:
- call void @nonnoduplicate_external(i8* %a_elt)
+ call void @nonnoduplicate_external(ptr %a_elt)
ret void
}
-declare void @noduplicate_external(i8*) #0
-declare void @nonnoduplicate_external(i8*) #1
+declare void @noduplicate_external(ptr) #0
+declare void @nonnoduplicate_external(ptr) #1
attributes #0 = { noduplicate nounwind }
attributes #1 = { nounwind }
target triple = "aarch64-unknown-linux-gnueabi"
-@global_function = internal unnamed_addr global void ()* null, align 8
-@global_array = common unnamed_addr global i64* null, align 8
+@global_function = internal unnamed_addr global ptr null, align 8
+@global_array = common unnamed_addr global ptr null, align 8
; This test checks that we propagate the functions through an internal global
; variable, and attach !callees metadata to the call. Such metadata can enable
; vectorizer to vectorize the sum reduction.
;
; CHECK: call void %tmp0(), !callees ![[MD:[0-9]+]]
-; CHECK: ![[MD]] = !{void ()* @invariant_1, void ()* @invariant_2}
+; CHECK: ![[MD]] = !{ptr @invariant_1, ptr @invariant_2}
;
define i64 @test_memory_entry(i64 %n, i1 %flag) {
entry:
br i1 %flag, label %then, label %else
then:
- store void ()* @invariant_1, void ()** @global_function
+ store ptr @invariant_1, ptr @global_function
br label %merge
else:
- store void ()* @invariant_2, void ()** @global_function
+ store ptr @invariant_2, ptr @global_function
br label %merge
merge:
define internal i64 @test_memory(i64 %n) {
entry:
- %array = load i64*, i64** @global_array
+ %array = load ptr, ptr @global_array
br label %for.body
for.body:
%i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
%r = phi i64 [ 0, %entry ], [ %tmp3, %for.body ]
- %tmp0 = load void ()*, void ()** @global_function
+ %tmp0 = load ptr, ptr @global_function
call void %tmp0()
- %tmp1 = getelementptr inbounds i64, i64* %array, i64 %i
- %tmp2 = load i64, i64* %tmp1
+ %tmp1 = getelementptr inbounds i64, ptr %array, i64 %i
+ %tmp2 = load i64, ptr %tmp1
%tmp3 = add i64 %tmp2, %r
%i.next = add nuw nsw i64 %i, 1
%cond = icmp slt i64 %i.next, %n
target triple = "aarch64-unknown-linux-gnueabi"
-@global_function = internal unnamed_addr global void ()* null, align 8
+@global_function = internal unnamed_addr global ptr null, align 8
@global_scalar = internal unnamed_addr global i64 zeroinitializer
; This test checks that we propagate the functions through a select
; always return the constant "1", eliminating the load and store instructions.
;
; CHECK: call void %tmp0(), !callees ![[MD:[0-9]+]]
-; CHECK: ![[MD]] = !{void ()* @norecurse_1, void ()* @norecurse_2}
+; CHECK: ![[MD]] = !{ptr @norecurse_1, ptr @norecurse_2}
;
define i64 @test_select_entry(i1 %flag) {
entry:
define internal i64 @test_select(i1 %flag) {
entry:
- %tmp0 = select i1 %flag, void ()* @norecurse_1, void ()* @norecurse_2
- store i64 1, i64* @global_scalar
+ %tmp0 = select i1 %flag, ptr @norecurse_1, ptr @norecurse_2
+ store i64 1, ptr @global_scalar
call void %tmp0()
- %tmp1 = load i64, i64* @global_scalar
+ %tmp1 = load i64, ptr @global_scalar
ret i64 %tmp1
}
; assembly
; REQUIRES: aarch64-registered-target
-define void @f(i8* %p, i32 %n, i32 %m) {
+define void @f(ptr %p, i32 %n, i32 %m) {
; CHECK-LABEL: f:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: add w8, w2, #1
%i.next = add i32 %i, 1
%i.next.fr = freeze i32 %i.next
%j = add i32 %m, %i.next.fr
- %q = getelementptr i8, i8* %p, i32 %j
- store i8 0, i8* %q
+ %q = getelementptr i8, ptr %p, i32 %j
+ store i8 0, ptr %q
%cond = icmp eq i32 %i.next.fr, %n
br i1 %cond, label %exit, label %loop
exit:
ret void
}
-define void @f_without_freeze(i8* %p, i32 %n, i32 %m) {
+define void @f_without_freeze(ptr %p, i32 %n, i32 %m) {
; CHECK-LABEL: f_without_freeze:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: add w8, w2, #1
%i = phi i32 [0, %entry], [%i.next, %loop]
%i.next = add i32 %i, 1
%j = add i32 %m, %i.next
- %q = getelementptr i8, i8* %p, i32 %j
- store i8 0, i8* %q
+ %q = getelementptr i8, ptr %p, i32 %j
+ store i8 0, ptr %q
%cond = icmp eq i32 %i.next, %n
br i1 %cond, label %exit, label %loop
exit:
%struct.g = type { i64, %struct.arc, i64, i64, i64 }
@m = global i64 0
-@h = global %struct.arc* null
+@h = global ptr null
@j = global %struct.g zeroinitializer
define dso_local i32 @main() {
bb:
- %tmp = load i64, i64* getelementptr inbounds (%struct.g, %struct.g* @j, i32 0, i32 0), align 8
+ %tmp = load i64, ptr @j, align 8
%tmp1 = icmp sgt i64 %tmp, 0
br i1 %tmp1, label %bb2, label %bb35
bb2: ; preds = %bb
- %tmp3 = load i64, i64* @m, align 8
- %tmp4 = load %struct.arc*, %struct.arc** @h, align 8
+ %tmp3 = load i64, ptr @m, align 8
+ %tmp4 = load ptr, ptr @h, align 8
; CHECK: %tmp3.frozen = freeze i64 %tmp3
br label %bb5
bb5: ; preds = %bb28, %bb2
- %tmp6 = phi %struct.arc* [ %tmp4, %bb2 ], [ %tmp31, %bb28 ]
+ %tmp6 = phi ptr [ %tmp4, %bb2 ], [ %tmp31, %bb28 ]
%tmp7 = phi i64 [ %tmp3, %bb2 ], [ %tmp12, %bb28 ]
; CHECK: %tmp7 = phi i64 [ %tmp3.frozen, %bb2 ], [ %tmp12, %bb28 ]
%tmp8 = phi i64 [ 0, %bb2 ], [ %tmp11, %bb28 ]
%tmp9 = trunc i64 %tmp7 to i32
- %tmp10 = getelementptr inbounds %struct.arc, %struct.arc* %tmp6, i64 0, i32 0
- store i32 %tmp9, i32* %tmp10, align 4
+ store i32 %tmp9, ptr %tmp6, align 4
%tmp11 = add nuw nsw i64 %tmp8, 1
%tmp12 = add nsw i64 %tmp7, 1
; CHECK: %tmp12 = add i64 %tmp7, 1
- store i64 %tmp12, i64* @m, align 8
- %tmp13 = load i64, i64* inttoptr (i64 16 to i64*), align 16
+ store i64 %tmp12, ptr @m, align 8
+ %tmp13 = load i64, ptr inttoptr (i64 16 to ptr), align 16
%tmp14 = freeze i64 %tmp12
; CHECK-NOT: %tmp14 = freeze i64 %tmp12
%tmp15 = freeze i64 %tmp13
%tmp16 = sdiv i64 %tmp14, %tmp15
%tmp17 = mul i64 %tmp16, %tmp15
%tmp18 = sub i64 %tmp14, %tmp17
- %tmp19 = load i64, i64* inttoptr (i64 24 to i64*), align 8
+ %tmp19 = load i64, ptr inttoptr (i64 24 to ptr), align 8
%tmp20 = icmp sgt i64 %tmp18, %tmp19
- %tmp21 = load i64, i64* inttoptr (i64 32 to i64*), align 32
+ %tmp21 = load i64, ptr inttoptr (i64 32 to ptr), align 32
br i1 %tmp20, label %bb22, label %bb28
bb22: ; preds = %bb5
bb28: ; preds = %bb22, %bb5
%tmp29 = phi i64 [ %tmp27, %bb22 ], [ %tmp21, %bb5 ]
%tmp30 = add nsw i64 %tmp29, %tmp16
- %tmp31 = getelementptr inbounds %struct.arc, %struct.arc* getelementptr inbounds (%struct.g, %struct.g* @j, i32 0, i32 1), i64 %tmp30
- store %struct.arc* %tmp31, %struct.arc** @h, align 8
- %tmp32 = load i64, i64* getelementptr inbounds (%struct.g, %struct.g* @j, i32 0, i32 0), align 8
+ %tmp31 = getelementptr inbounds %struct.arc, ptr getelementptr inbounds (%struct.g, ptr @j, i32 0, i32 1), i64 %tmp30
+ store ptr %tmp31, ptr @h, align 8
+ %tmp32 = load i64, ptr @j, align 8
%tmp33 = icmp slt i64 %tmp11, %tmp32
br i1 %tmp33, label %bb5, label %bb34
}
; Negative test
-define void @gep(i8* %init, i8* %end) {
+define void @gep(ptr %init, ptr %end) {
; CHECK-LABEL: @gep(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i8* [ [[INIT:%.*]], [[ENTRY:%.*]] ], [ [[I_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[I_NEXT]] = getelementptr inbounds i8, i8* [[I]], i64 1
-; CHECK-NEXT: [[I_NEXT_FR:%.*]] = freeze i8* [[I_NEXT]]
-; CHECK-NEXT: [[COND:%.*]] = icmp eq i8* [[I_NEXT_FR]], [[END:%.*]]
+; CHECK-NEXT: [[I:%.*]] = phi ptr [ [[INIT:%.*]], [[ENTRY:%.*]] ], [ [[I_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I_NEXT]] = getelementptr inbounds i8, ptr [[I]], i64 1
+; CHECK-NEXT: [[I_NEXT_FR:%.*]] = freeze ptr [[I_NEXT]]
+; CHECK-NEXT: [[COND:%.*]] = icmp eq ptr [[I_NEXT_FR]], [[END:%.*]]
; CHECK-NEXT: br i1 [[COND]], label [[LOOP]], label [[EXIT:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
br label %loop
loop:
- %i = phi i8* [ %init, %entry], [%i.next, %loop ]
- %i.next = getelementptr inbounds i8, i8* %i, i64 1
- %i.next.fr = freeze i8* %i.next
- %cond = icmp eq i8* %i.next.fr, %end
+ %i = phi ptr [ %init, %entry], [%i.next, %loop ]
+ %i.next = getelementptr inbounds i8, ptr %i, i64 1
+ %i.next.fr = freeze ptr %i.next
+ %cond = icmp eq ptr %i.next.fr, %end
br i1 %cond, label %loop, label %exit
exit:
define i32 @test1() nounwind {
; CHECK-LABEL: test1
; CHECK: %const = bitcast i64 68141056 to i64
-; CHECK: %1 = inttoptr i64 %const to %T*
-; CHECK: %o1 = getelementptr %T, %T* %1, i32 0, i32 1
-; CHECK: %o2 = getelementptr %T, %T* %1, i32 0, i32 2
-; CHECK: %o3 = getelementptr %T, %T* %1, i32 0, i32 3
- %at = inttoptr i64 68141056 to %T*
- %o1 = getelementptr %T, %T* %at, i32 0, i32 1
- %t1 = load i32, i32* %o1
- %o2 = getelementptr %T, %T* %at, i32 0, i32 2
- %t2 = load i32, i32* %o2
+; CHECK: %1 = inttoptr i64 %const to ptr
+; CHECK: %o1 = getelementptr %T, ptr %1, i32 0, i32 1
+; CHECK: %o2 = getelementptr %T, ptr %1, i32 0, i32 2
+; CHECK: %o3 = getelementptr %T, ptr %1, i32 0, i32 3
+ %at = inttoptr i64 68141056 to ptr
+ %o1 = getelementptr %T, ptr %at, i32 0, i32 1
+ %t1 = load i32, ptr %o1
+ %o2 = getelementptr %T, ptr %at, i32 0, i32 2
+ %t2 = load i32, ptr %o2
%a1 = add i32 %t1, %t2
- %o3 = getelementptr %T, %T* %at, i32 0, i32 3
- %t3 = load i32, i32* %o3
+ %o3 = getelementptr %T, ptr %at, i32 0, i32 3
+ %t3 = load i32, ptr %o3
%a2 = add i32 %a1, %t3
ret i32 %a2
}
; Make sure we hoist constants out of intrinsics.
-define void @test_stxr(i64* %ptr) {
+define void @test_stxr(ptr %ptr) {
; CHECK-LABEL: @test_stxr(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CONST:%.*]] = bitcast i64 -9223372036317904832 to i64
-; CHECK-NEXT: [[PTR_0:%.*]] = getelementptr i64, i64* [[PTR:%.*]], i64 0
; CHECK-NEXT: [[CONST_MAT:%.*]] = add i64 [[CONST]], -64
-; CHECK-NEXT: [[BAR_0:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[CONST_MAT]], i64* elementtype(i64) [[PTR_0]])
-; CHECK-NEXT: [[PTR_1:%.*]] = getelementptr i64, i64* [[PTR]], i64 1
-; CHECK-NEXT: [[BAR_1:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[CONST]], i64* elementtype(i64) [[PTR_1]])
-; CHECK-NEXT: [[PTR_2:%.*]] = getelementptr i64, i64* [[PTR]], i64 2
+; CHECK-NEXT: [[BAR_0:%.*]] = call i32 @llvm.aarch64.stxr.p0(i64 [[CONST_MAT]], ptr elementtype(i64) [[PTR:%.*]])
+; CHECK-NEXT: [[PTR_1:%.*]] = getelementptr i64, ptr [[PTR]], i64 1
+; CHECK-NEXT: [[BAR_1:%.*]] = call i32 @llvm.aarch64.stxr.p0(i64 [[CONST]], ptr elementtype(i64) [[PTR_1]])
+; CHECK-NEXT: [[PTR_2:%.*]] = getelementptr i64, ptr [[PTR]], i64 2
; CHECK-NEXT: [[CONST_MAT1:%.*]] = add i64 [[CONST]], 64
-; CHECK-NEXT: [[BAR_2:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[CONST_MAT1]], i64* elementtype(i64) [[PTR_2]])
-; CHECK-NEXT: [[PTR_3:%.*]] = getelementptr i64, i64* [[PTR]], i64 3
+; CHECK-NEXT: [[BAR_2:%.*]] = call i32 @llvm.aarch64.stxr.p0(i64 [[CONST_MAT1]], ptr elementtype(i64) [[PTR_2]])
+; CHECK-NEXT: [[PTR_3:%.*]] = getelementptr i64, ptr [[PTR]], i64 3
; CHECK-NEXT: [[CONST_MAT2:%.*]] = add i64 [[CONST]], 128
-; CHECK-NEXT: [[BAR_3:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[CONST_MAT2]], i64* elementtype(i64) [[PTR_3]])
+; CHECK-NEXT: [[BAR_3:%.*]] = call i32 @llvm.aarch64.stxr.p0(i64 [[CONST_MAT2]], ptr elementtype(i64) [[PTR_3]])
; CHECK-NEXT: ret void
;
entry:
- %ptr.0 = getelementptr i64, i64* %ptr, i64 0
- %bar.0 = call i32 @llvm.aarch64.stxr.p0i64(i64 -9223372036317904896, i64* elementtype(i64) %ptr.0)
- %ptr.1 = getelementptr i64, i64* %ptr, i64 1
- %bar.1 = call i32 @llvm.aarch64.stxr.p0i64(i64 -9223372036317904832, i64* elementtype(i64) %ptr.1)
- %ptr.2 = getelementptr i64, i64* %ptr, i64 2
- %bar.2 = call i32 @llvm.aarch64.stxr.p0i64(i64 -9223372036317904768, i64* elementtype(i64) %ptr.2)
- %ptr.3 = getelementptr i64, i64* %ptr, i64 3
- %bar.3 = call i32 @llvm.aarch64.stxr.p0i64(i64 -9223372036317904704, i64* elementtype(i64) %ptr.3)
+ %bar.0 = call i32 @llvm.aarch64.stxr.p0(i64 -9223372036317904896, ptr elementtype(i64) %ptr)
+ %ptr.1 = getelementptr i64, ptr %ptr, i64 1
+ %bar.1 = call i32 @llvm.aarch64.stxr.p0(i64 -9223372036317904832, ptr elementtype(i64) %ptr.1)
+ %ptr.2 = getelementptr i64, ptr %ptr, i64 2
+ %bar.2 = call i32 @llvm.aarch64.stxr.p0(i64 -9223372036317904768, ptr elementtype(i64) %ptr.2)
+ %ptr.3 = getelementptr i64, ptr %ptr, i64 3
+ %bar.3 = call i32 @llvm.aarch64.stxr.p0(i64 -9223372036317904704, ptr elementtype(i64) %ptr.3)
ret void
}
-declare i32 @llvm.aarch64.stxr.p0i64(i64 , i64*)
+declare i32 @llvm.aarch64.stxr.p0(i64 , ptr)
define i64 @test_udiv(i64 %x) {
; CHECK-LABEL: @test_udiv(
declare i64 @llvm.aarch64.udiv.i64.i64(i64, i64)
-define void @test_free_intrinsics(i64 %x, i8* %ptr) {
+define void @test_free_intrinsics(i64 %x, ptr %ptr) {
; CHECK-LABEL: @test_free_intrinsics(
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 100000000032, i8* [[PTR:%.*]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 100000000064, i8* [[PTR]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 100000000128, i8* [[PTR]])
-; CHECK-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 100000000256, i8* [[PTR]])
-; CHECK-NEXT: call void @llvm.invariant.end.p0i8({}* [[I]], i64 100000000256, i8* [[PTR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 100000000032, ptr [[PTR:%.*]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 100000000064, ptr [[PTR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 100000000128, ptr [[PTR]])
+; CHECK-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 100000000256, ptr [[PTR]])
+; CHECK-NEXT: call void @llvm.invariant.end.p0(ptr [[I]], i64 100000000256, ptr [[PTR]])
; CHECK-NEXT: ret void
;
entry:
- call void @llvm.lifetime.start.p0i8(i64 100000000032, i8* %ptr)
- call void @llvm.lifetime.start.p0i8(i64 100000000064, i8* %ptr)
- call void @llvm.lifetime.end.p0i8(i64 100000000128, i8* %ptr)
- %i = call {}* @llvm.invariant.start.p0i8(i64 100000000256, i8* %ptr)
- call void @llvm.invariant.end.p0i8({}* %i, i64 100000000256, i8* %ptr)
+ call void @llvm.lifetime.start.p0(i64 100000000032, ptr %ptr)
+ call void @llvm.lifetime.start.p0(i64 100000000064, ptr %ptr)
+ call void @llvm.lifetime.end.p0(i64 100000000128, ptr %ptr)
+ %i = call ptr @llvm.invariant.start.p0(i64 100000000256, ptr %ptr)
+ call void @llvm.invariant.end.p0(ptr %i, i64 100000000256, ptr %ptr)
ret void
}
-declare void @llvm.lifetime.start.p0i8(i64, i8*)
-declare void @llvm.lifetime.end.p0i8(i64, i8*)
+declare void @llvm.lifetime.start.p0(i64, ptr)
+declare void @llvm.lifetime.end.p0(i64, ptr)
-declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture)
-declare void @llvm.invariant.end.p0i8({}*, i64, i8* nocapture)
+declare ptr @llvm.invariant.start.p0(i64, ptr nocapture)
+declare void @llvm.invariant.end.p0(ptr, i64, ptr nocapture)
; CHECK-NEXT: br label [[LBL1:%.*]]
; CHECK: lbl1.us:
; CHECK-NEXT: [[CONST1:%.*]] = bitcast i32 1232131 to i32
-; CHECK-NEXT: store i32 [[CONST1]], i32* @c.a, align 1
+; CHECK-NEXT: store i32 [[CONST1]], ptr @c.a, align 1
; CHECK-NEXT: br label [[FOR_COND4:%.*]]
; CHECK: lbl1:
-; CHECK-NEXT: store i32 [[CONST]], i32* @c.a, align 1
+; CHECK-NEXT: store i32 [[CONST]], ptr @c.a, align 1
; CHECK-NEXT: br i1 undef, label [[IF_THEN:%.*]], label [[FOR_END12:%.*]]
; CHECK: if.then:
; CHECK-NEXT: br i1 undef, label [[LBL1]], label [[FOR_COND4]]
; CHECK: for.cond4:
; CHECK-NEXT: br label [[FOR_COND4]]
; CHECK: for.body9:
-; CHECK-NEXT: store i32 1232131, i32* undef, align 1
-; CHECK-NEXT: store i32 1232132, i32* undef, align 1
+; CHECK-NEXT: store i32 1232131, ptr undef, align 1
+; CHECK-NEXT: store i32 1232132, ptr undef, align 1
; CHECK-NEXT: br label [[FOR_BODY9:%.*]]
; CHECK: for.end12:
; CHECK-NEXT: ret void
br label %lbl1
lbl1.us: ; preds = %entry
- store i32 1232131, i32* @c.a, align 1
+ store i32 1232131, ptr @c.a, align 1
br label %for.cond4
lbl1: ; preds = %if.then, %entry.entry.split_crit_edge
- store i32 1232131, i32* @c.a, align 1
+ store i32 1232131, ptr @c.a, align 1
br i1 undef, label %if.then, label %for.end12
if.then: ; preds = %lbl1
br label %for.cond4
for.body9: ; preds = %for.body9
- store i32 1232131, i32* undef, align 1
- store i32 1232132, i32* undef, align 1
+ store i32 1232131, ptr undef, align 1
+ store i32 1232132, ptr undef, align 1
br label %for.body9
for.end12: ; preds = %lbl1
; We don't want to convert constant divides because the benefit from converting
; them to a mul in the backend is larget than constant materialization savings.
-define void @signed_const_division(i32 %in1, i32 %in2, i32* %addr) {
+define void @signed_const_division(i32 %in1, i32 %in2, ptr %addr) {
; CHECK-LABEL: @signed_const_division
; CHECK: %res1 = sdiv i32 %l1, 1000000000
; CHECK: %res2 = srem i32 %l2, 1000000000
%l1 = phi i32 [%res1, %loop], [%in1, %entry]
%l2 = phi i32 [%res2, %loop], [%in2, %entry]
%res1 = sdiv i32 %l1, 1000000000
- store volatile i32 %res1, i32* %addr
+ store volatile i32 %res1, ptr %addr
%res2 = srem i32 %l2, 1000000000
- store volatile i32 %res2, i32* %addr
+ store volatile i32 %res2, ptr %addr
%again = icmp eq i32 %res1, %res2
br i1 %again, label %loop, label %end
ret void
}
-define void @unsigned_const_division(i32 %in1, i32 %in2, i32* %addr) {
+define void @unsigned_const_division(i32 %in1, i32 %in2, ptr %addr) {
; CHECK-LABEL: @unsigned_const_division
; CHECK: %res1 = udiv i32 %l1, 1000000000
; CHECK: %res2 = urem i32 %l2, 1000000000
%l1 = phi i32 [%res1, %loop], [%in1, %entry]
%l2 = phi i32 [%res2, %loop], [%in2, %entry]
%res1 = udiv i32 %l1, 1000000000
- store volatile i32 %res1, i32* %addr
+ store volatile i32 %res1, ptr %addr
%res2 = urem i32 %l2, 1000000000
- store volatile i32 %res2, i32* %addr
+ store volatile i32 %res2, ptr %addr
%again = icmp eq i32 %res1, %res2
br i1 %again, label %loop, label %end
@exception_type = external global i8
; Constants in inline ASM should not be hoisted.
-define i32 @inline_asm_invoke() personality i8* null {
+define i32 @inline_asm_invoke() personality ptr null {
;CHECK-LABEL: @inline_asm_invoke
;CHECK-NOT: %const = 214672
;CHECK: %X = invoke i32 asm "bswap $0", "=r,r"(i32 214672)
lpad:
%lp = landingpad i32
cleanup
- catch i8* @exception_type
+ catch ptr @exception_type
ret i32 1
}
; CHECK-LABEL: @foo
; CHECK-NOT: [[CONST1:%const_mat[0-9]*]] = add i32 %const, -4
; CHECK-LABEL: @foo_pgso
- %0 = load volatile i32, i32* inttoptr (i32 1073876992 to i32*), align 4096
+ %0 = load volatile i32, ptr inttoptr (i32 1073876992 to ptr), align 4096
%or = or i32 %0, 1
- store volatile i32 %or, i32* inttoptr (i32 1073876992 to i32*), align 4096
- %1 = load volatile i32, i32* inttoptr (i32 1073876996 to i32*), align 4
+ store volatile i32 %or, ptr inttoptr (i32 1073876992 to ptr), align 4096
+ %1 = load volatile i32, ptr inttoptr (i32 1073876996 to ptr), align 4
%and = and i32 %1, -117506048
- store volatile i32 %and, i32* inttoptr (i32 1073876996 to i32*), align 4
- %2 = load volatile i32, i32* inttoptr (i32 1073876992 to i32*), align 4096
+ store volatile i32 %and, ptr inttoptr (i32 1073876996 to ptr), align 4
+ %2 = load volatile i32, ptr inttoptr (i32 1073876992 to ptr), align 4096
%and1 = and i32 %2, -17367041
- store volatile i32 %and1, i32* inttoptr (i32 1073876996 to i32*), align 4096
- %3 = load volatile i32, i32* inttoptr (i32 1073876992 to i32*), align 4096
+ store volatile i32 %and1, ptr inttoptr (i32 1073876996 to ptr), align 4096
+ %3 = load volatile i32, ptr inttoptr (i32 1073876992 to ptr), align 4096
%and2 = and i32 %3, -262145
- store volatile i32 %and2, i32* inttoptr (i32 1073876992 to i32*), align 4096
- %4 = load volatile i32, i32* inttoptr (i32 1073876996 to i32*), align 4
+ store volatile i32 %and2, ptr inttoptr (i32 1073876992 to ptr), align 4096
+ %4 = load volatile i32, ptr inttoptr (i32 1073876996 to ptr), align 4
%and3 = and i32 %4, -8323073
- store volatile i32 %and3, i32* inttoptr (i32 1073876996 to i32*), align 4
- store volatile i32 10420224, i32* inttoptr (i32 1073877000 to i32*), align 8
- %5 = load volatile i32, i32* inttoptr (i32 1073876996 to i32*), align 4096
+ store volatile i32 %and3, ptr inttoptr (i32 1073876996 to ptr), align 4
+ store volatile i32 10420224, ptr inttoptr (i32 1073877000 to ptr), align 8
+ %5 = load volatile i32, ptr inttoptr (i32 1073876996 to ptr), align 4096
%or4 = or i32 %5, 65536
- store volatile i32 %or4, i32* inttoptr (i32 1073876996 to i32*), align 4096
- %6 = load volatile i32, i32* inttoptr (i32 1073881088 to i32*), align 8192
+ store volatile i32 %or4, ptr inttoptr (i32 1073876996 to ptr), align 4096
+ %6 = load volatile i32, ptr inttoptr (i32 1073881088 to ptr), align 8192
%or6.i.i = or i32 %6, 16
- store volatile i32 %or6.i.i, i32* inttoptr (i32 1073881088 to i32*), align 8192
- %7 = load volatile i32, i32* inttoptr (i32 1073881088 to i32*), align 8192
+ store volatile i32 %or6.i.i, ptr inttoptr (i32 1073881088 to ptr), align 8192
+ %7 = load volatile i32, ptr inttoptr (i32 1073881088 to ptr), align 8192
%and7.i.i = and i32 %7, -4
- store volatile i32 %and7.i.i, i32* inttoptr (i32 1073881088 to i32*), align 8192
- %8 = load volatile i32, i32* inttoptr (i32 1073881088 to i32*), align 8192
+ store volatile i32 %and7.i.i, ptr inttoptr (i32 1073881088 to ptr), align 8192
+ %8 = load volatile i32, ptr inttoptr (i32 1073881088 to ptr), align 8192
%or8.i.i = or i32 %8, 2
- store volatile i32 %or8.i.i, i32* inttoptr (i32 1073881088 to i32*), align 8192
+ store volatile i32 %or8.i.i, ptr inttoptr (i32 1073881088 to ptr), align 8192
ret void
}
; PGSO-NOT: [[CONST2:%const_mat[0-9]*]] = add i32 %const, -4
; NPGSO-LABEL: @foo_pgso
; NPGSO: [[CONST2:%const_mat[0-9]*]] = add i32 %const, -4
- %0 = load volatile i32, i32* inttoptr (i32 1073876992 to i32*), align 4096
+ %0 = load volatile i32, ptr inttoptr (i32 1073876992 to ptr), align 4096
%or = or i32 %0, 1
- store volatile i32 %or, i32* inttoptr (i32 1073876992 to i32*), align 4096
- %1 = load volatile i32, i32* inttoptr (i32 1073876996 to i32*), align 4
+ store volatile i32 %or, ptr inttoptr (i32 1073876992 to ptr), align 4096
+ %1 = load volatile i32, ptr inttoptr (i32 1073876996 to ptr), align 4
%and = and i32 %1, -117506048
- store volatile i32 %and, i32* inttoptr (i32 1073876996 to i32*), align 4
- %2 = load volatile i32, i32* inttoptr (i32 1073876992 to i32*), align 4096
+ store volatile i32 %and, ptr inttoptr (i32 1073876996 to ptr), align 4
+ %2 = load volatile i32, ptr inttoptr (i32 1073876992 to ptr), align 4096
%and1 = and i32 %2, -17367041
- store volatile i32 %and1, i32* inttoptr (i32 1073876996 to i32*), align 4096
- %3 = load volatile i32, i32* inttoptr (i32 1073876992 to i32*), align 4096
+ store volatile i32 %and1, ptr inttoptr (i32 1073876996 to ptr), align 4096
+ %3 = load volatile i32, ptr inttoptr (i32 1073876992 to ptr), align 4096
%and2 = and i32 %3, -262145
- store volatile i32 %and2, i32* inttoptr (i32 1073876992 to i32*), align 4096
- %4 = load volatile i32, i32* inttoptr (i32 1073876996 to i32*), align 4
+ store volatile i32 %and2, ptr inttoptr (i32 1073876992 to ptr), align 4096
+ %4 = load volatile i32, ptr inttoptr (i32 1073876996 to ptr), align 4
%and3 = and i32 %4, -8323073
- store volatile i32 %and3, i32* inttoptr (i32 1073876996 to i32*), align 4
- store volatile i32 10420224, i32* inttoptr (i32 1073877000 to i32*), align 8
- %5 = load volatile i32, i32* inttoptr (i32 1073876996 to i32*), align 4096
+ store volatile i32 %and3, ptr inttoptr (i32 1073876996 to ptr), align 4
+ store volatile i32 10420224, ptr inttoptr (i32 1073877000 to ptr), align 8
+ %5 = load volatile i32, ptr inttoptr (i32 1073876996 to ptr), align 4096
%or4 = or i32 %5, 65536
- store volatile i32 %or4, i32* inttoptr (i32 1073876996 to i32*), align 4096
- %6 = load volatile i32, i32* inttoptr (i32 1073881088 to i32*), align 8192
+ store volatile i32 %or4, ptr inttoptr (i32 1073876996 to ptr), align 4096
+ %6 = load volatile i32, ptr inttoptr (i32 1073881088 to ptr), align 8192
%or6.i.i = or i32 %6, 16
- store volatile i32 %or6.i.i, i32* inttoptr (i32 1073881088 to i32*), align 8192
- %7 = load volatile i32, i32* inttoptr (i32 1073881088 to i32*), align 8192
+ store volatile i32 %or6.i.i, ptr inttoptr (i32 1073881088 to ptr), align 8192
+ %7 = load volatile i32, ptr inttoptr (i32 1073881088 to ptr), align 8192
%and7.i.i = and i32 %7, -4
- store volatile i32 %and7.i.i, i32* inttoptr (i32 1073881088 to i32*), align 8192
- %8 = load volatile i32, i32* inttoptr (i32 1073881088 to i32*), align 8192
+ store volatile i32 %and7.i.i, ptr inttoptr (i32 1073881088 to ptr), align 8192
+ %8 = load volatile i32, ptr inttoptr (i32 1073881088 to ptr), align 8192
%or8.i.i = or i32 %8, 2
- store volatile i32 %or8.i.i, i32* inttoptr (i32 1073881088 to i32*), align 8192
+ store volatile i32 %or8.i.i, ptr inttoptr (i32 1073881088 to ptr), align 8192
ret void
}
i32, i32, i32, i32, i32, i32 }
; Indices for GEPs should not be hoisted.
-define i32 @test1(%T* %P) nounwind {
+define i32 @test1(ptr %P) nounwind {
; CHECK-LABEL: @test1
-; CHECK: %addr1 = getelementptr %T, %T* %P, i32 256, i32 256
-; CHECK: %addr2 = getelementptr %T, %T* %P, i32 256, i32 256
- %addr1 = getelementptr %T, %T* %P, i32 256, i32 256
- %tmp1 = load i32, i32* %addr1
- %addr2 = getelementptr %T, %T* %P, i32 256, i32 256
- %tmp2 = load i32, i32* %addr2
+; CHECK: %addr1 = getelementptr %T, ptr %P, i32 256, i32 256
+; CHECK: %addr2 = getelementptr %T, ptr %P, i32 256, i32 256
+ %addr1 = getelementptr %T, ptr %P, i32 256, i32 256
+ %tmp1 = load i32, ptr %addr1
+ %addr2 = getelementptr %T, ptr %P, i32 256, i32 256
+ %tmp2 = load i32, ptr %addr2
%tmp4 = add i32 %tmp1, %tmp2
ret i32 %tmp4
}
; CHECK-DAG: %[[C2:const[0-9]?]] = bitcast i32 805874688 to i32
; CHECK-DAG: %[[C3:const[0-9]?]] = bitcast i32 805873720 to i32
; CHECK-DAG: %[[C4:const[0-9]?]] = bitcast i32 805873688 to i32
-; CHECK: %0 = inttoptr i32 %[[C2]] to i8*
-; CHECK-NEXT: %1 = load volatile i8, i8* %0
+; CHECK: %0 = inttoptr i32 %[[C2]] to ptr
+; CHECK-NEXT: %1 = load volatile i8, ptr %0
; CHECK-NEXT: %[[M1:const_mat[0-9]?]] = add i32 %[[C2]], 4
-; CHECK-NEXT: %2 = inttoptr i32 %[[M1]] to i8*
-; CHECK-NEXT: %3 = load volatile i8, i8* %2
+; CHECK-NEXT: %2 = inttoptr i32 %[[M1]] to ptr
+; CHECK-NEXT: %3 = load volatile i8, ptr %2
; CHECK-NEXT: %[[M2:const_mat[0-9]?]] = add i32 %[[C2]], 31
-; CHECK-NEXT: %4 = inttoptr i32 %[[M2]] to i8*
-; CHECK-NEXT: %5 = load volatile i8, i8* %4
-; CHECK-NEXT: %6 = inttoptr i32 %[[C1]] to i8*
-; CHECK-NEXT: %7 = load volatile i8, i8* %6
+; CHECK-NEXT: %4 = inttoptr i32 %[[M2]] to ptr
+; CHECK-NEXT: %5 = load volatile i8, ptr %4
+; CHECK-NEXT: %6 = inttoptr i32 %[[C1]] to ptr
+; CHECK-NEXT: %7 = load volatile i8, ptr %6
; CHECK-NEXT: %[[M3:const_mat[0-9]?]] = add i32 %[[C1]], 7
-; CHECK-NEXT: %8 = inttoptr i32 %[[M3]] to i8*
-; CHECK-NEXT: %9 = load volatile i8, i8* %8
-; CHECK-NEXT: %10 = inttoptr i32 %[[C4]] to i8*
-; CHECK-NEXT: store i8 %9, i8* %10
+; CHECK-NEXT: %8 = inttoptr i32 %[[M3]] to ptr
+; CHECK-NEXT: %9 = load volatile i8, ptr %8
+; CHECK-NEXT: %10 = inttoptr i32 %[[C4]] to ptr
+; CHECK-NEXT: store i8 %9, ptr %10
; CHECK-NEXT: %[[M4:const_mat[0-9]?]] = add i32 %[[C4]], 31
-; CHECK-NEXT: %11 = inttoptr i32 %[[M4]] to i8*
-; CHECK-NEXT: store i8 %7, i8* %11
-; CHECK-NEXT: %12 = inttoptr i32 %[[C3]] to i8*
-; CHECK-NEXT: store i8 %5, i8* %12
+; CHECK-NEXT: %11 = inttoptr i32 %[[M4]] to ptr
+; CHECK-NEXT: store i8 %7, ptr %11
+; CHECK-NEXT: %12 = inttoptr i32 %[[C3]] to ptr
+; CHECK-NEXT: store i8 %5, ptr %12
; CHECK-NEXT: %[[M5:const_mat[0-9]?]] = add i32 %[[C3]], 7
-; CHECK-NEXT: %13 = inttoptr i32 %[[M5]] to i8*
-; CHECK-NEXT: store i8 %3, i8* %13
+; CHECK-NEXT: %13 = inttoptr i32 %[[M5]] to ptr
+; CHECK-NEXT: store i8 %3, ptr %13
; CHECK-NEXT: %[[M6:const_mat[0-9]?]] = add i32 %[[C1]], 80
-; CHECK-NEXT: %14 = inttoptr i32 %[[M6]] to i8*
-; CHECK-NEXT: store i8* %14, i8** @goo
+; CHECK-NEXT: %14 = inttoptr i32 %[[M6]] to ptr
+; CHECK-NEXT: store ptr %14, ptr @goo
-@goo = global i8* undef
+@goo = global ptr undef
define void @foo_i8() {
entry:
- %0 = load volatile i8, i8* inttoptr (i32 805874688 to i8*)
- %1 = load volatile i8, i8* inttoptr (i32 805874692 to i8*)
- %2 = load volatile i8, i8* inttoptr (i32 805874719 to i8*)
- %3 = load volatile i8, i8* inttoptr (i32 805874720 to i8*)
- %4 = load volatile i8, i8* inttoptr (i32 805874727 to i8*)
- store i8 %4, i8* inttoptr(i32 805873688 to i8*)
- store i8 %3, i8* inttoptr(i32 805873719 to i8*)
- store i8 %2, i8* inttoptr(i32 805873720 to i8*)
- store i8 %1, i8* inttoptr(i32 805873727 to i8*)
- store i8* inttoptr(i32 805874800 to i8*), i8** @goo
+ %0 = load volatile i8, ptr inttoptr (i32 805874688 to ptr)
+ %1 = load volatile i8, ptr inttoptr (i32 805874692 to ptr)
+ %2 = load volatile i8, ptr inttoptr (i32 805874719 to ptr)
+ %3 = load volatile i8, ptr inttoptr (i32 805874720 to ptr)
+ %4 = load volatile i8, ptr inttoptr (i32 805874727 to ptr)
+ store i8 %4, ptr inttoptr(i32 805873688 to ptr)
+ store i8 %3, ptr inttoptr(i32 805873719 to ptr)
+ store i8 %2, ptr inttoptr(i32 805873720 to ptr)
+ store i8 %1, ptr inttoptr(i32 805873727 to ptr)
+ store ptr inttoptr(i32 805874800 to ptr), ptr @goo
ret void
}
; CHECK: foo_i16
; CHECK-DAG: %[[C1:const[0-9]?]] = bitcast i32 805874752 to i32
; CHECK-DAG: %[[C2:const[0-9]?]] = bitcast i32 805874688 to i32
-; CHECK: %0 = inttoptr i32 %[[C2]] to i16*
-; CHECK-NEXT: %1 = load volatile i16, i16* %0, align 2
+; CHECK: %0 = inttoptr i32 %[[C2]] to ptr
+; CHECK-NEXT: %1 = load volatile i16, ptr %0, align 2
; CHECK-NEXT: %[[M1:const_mat[0-9]?]] = add i32 %[[C2]], 4
-; CHECK-NEXT: %2 = inttoptr i32 %[[M1]] to i16*
-; CHECK-NEXT: %3 = load volatile i16, i16* %2, align 2
+; CHECK-NEXT: %2 = inttoptr i32 %[[M1]] to ptr
+; CHECK-NEXT: %3 = load volatile i16, ptr %2, align 2
; CHECK-NEXT: %[[M2:const_mat[0-9]?]] = add i32 %[[C2]], 32
-; CHECK-NEXT: %4 = inttoptr i32 %[[M2]] to i16*
-; CHECK-NEXT: %5 = load volatile i16, i16* %4, align 2
+; CHECK-NEXT: %4 = inttoptr i32 %[[M2]] to ptr
+; CHECK-NEXT: %5 = load volatile i16, ptr %4, align 2
; CHECK-NEXT: %[[M3:const_mat[0-9]?]] = add i32 %[[C2]], 62
-; CHECK-NEXT: %6 = inttoptr i32 %[[M3]] to i16*
-; CHECK-NEXT: %7 = load volatile i16, i16* %6, align 2
-; CHECK-NEXT: %8 = inttoptr i32 %[[C1]] to i16*
-; CHECK-NEXT: %9 = load volatile i16, i16* %8, align 2
+; CHECK-NEXT: %6 = inttoptr i32 %[[M3]] to ptr
+; CHECK-NEXT: %7 = load volatile i16, ptr %6, align 2
+; CHECK-NEXT: %8 = inttoptr i32 %[[C1]] to ptr
+; CHECK-NEXT: %9 = load volatile i16, ptr %8, align 2
; CHECK-NEXT: %[[M4:const_mat[0-9]?]] = add i32 %[[C1]], 22
-; CHECK-NEXT: %10 = inttoptr i32 %[[M4]] to i16*
-; CHECK-NEXT: %11 = load volatile i16, i16* %10, align 2
+; CHECK-NEXT: %10 = inttoptr i32 %[[M4]] to ptr
+; CHECK-NEXT: %11 = load volatile i16, ptr %10, align 2
define void @foo_i16() {
entry:
- %0 = load volatile i16, i16* inttoptr (i32 805874688 to i16*), align 2
- %1 = load volatile i16, i16* inttoptr (i32 805874692 to i16*), align 2
- %2 = load volatile i16, i16* inttoptr (i32 805874720 to i16*), align 2
- %3 = load volatile i16, i16* inttoptr (i32 805874750 to i16*), align 2
- %4 = load volatile i16, i16* inttoptr (i32 805874752 to i16*), align 2
- %5 = load volatile i16, i16* inttoptr (i32 805874774 to i16*), align 2
+ %0 = load volatile i16, ptr inttoptr (i32 805874688 to ptr), align 2
+ %1 = load volatile i16, ptr inttoptr (i32 805874692 to ptr), align 2
+ %2 = load volatile i16, ptr inttoptr (i32 805874720 to ptr), align 2
+ %3 = load volatile i16, ptr inttoptr (i32 805874750 to ptr), align 2
+ %4 = load volatile i16, ptr inttoptr (i32 805874752 to ptr), align 2
+ %5 = load volatile i16, ptr inttoptr (i32 805874774 to ptr), align 2
ret void
}
; CHECK: foo_i32
; CHECK-DAG: %[[C1:const[0-9]?]] = bitcast i32 805874816 to i32
; CHECK-DAG: %[[C2:const[0-9]?]] = bitcast i32 805874688 to i32
-; CHECK: %0 = inttoptr i32 %[[C2]] to i32*
-; CHECK-NEXT: %1 = load volatile i32, i32* %0, align 4
+; CHECK: %0 = inttoptr i32 %[[C2]] to ptr
+; CHECK-NEXT: %1 = load volatile i32, ptr %0, align 4
; CHECK-NEXT: %[[M1:const_mat[0-9]?]] = add i32 %[[C2]], 4
-; CHECK-NEXT: %2 = inttoptr i32 %[[M1]] to i32*
-; CHECK-NEXT: %3 = load volatile i32, i32* %2, align 4
+; CHECK-NEXT: %2 = inttoptr i32 %[[M1]] to ptr
+; CHECK-NEXT: %3 = load volatile i32, ptr %2, align 4
; CHECK-NEXT: %[[M2:const_mat[0-9]?]] = add i32 %[[C2]], 124
-; CHECK-NEXT: %4 = inttoptr i32 %[[M2]] to i32*
-; CHECK-NEXT: %5 = load volatile i32, i32* %4, align 4
-; CHECK-NEXT: %6 = inttoptr i32 %[[C1]] to i32*
-; CHECK-NEXT: %7 = load volatile i32, i32* %6, align 4
+; CHECK-NEXT: %4 = inttoptr i32 %[[M2]] to ptr
+; CHECK-NEXT: %5 = load volatile i32, ptr %4, align 4
+; CHECK-NEXT: %6 = inttoptr i32 %[[C1]] to ptr
+; CHECK-NEXT: %7 = load volatile i32, ptr %6, align 4
; CHECK-NEXT: %[[M3:const_mat[0-9]?]] = add i32 %[[C1]], 8
-; CHECK-NEXT: %8 = inttoptr i32 %[[M3]] to i32*
-; CHECK-NEXT: %9 = load volatile i32, i32* %8, align 4
+; CHECK-NEXT: %8 = inttoptr i32 %[[M3]] to ptr
+; CHECK-NEXT: %9 = load volatile i32, ptr %8, align 4
; CHECK-NEXT: %[[M4:const_mat[0-9]?]] = add i32 %[[C1]], 12
-; CHECK-NEXT: %10 = inttoptr i32 %[[M4]] to i32*
-; CHECK-NEXT: %11 = load volatile i32, i32* %10, align 4
+; CHECK-NEXT: %10 = inttoptr i32 %[[M4]] to ptr
+; CHECK-NEXT: %11 = load volatile i32, ptr %10, align 4
define void @foo_i32() {
entry:
- %0 = load volatile i32, i32* inttoptr (i32 805874688 to i32*), align 4
- %1 = load volatile i32, i32* inttoptr (i32 805874692 to i32*), align 4
- %2 = load volatile i32, i32* inttoptr (i32 805874812 to i32*), align 4
- %3 = load volatile i32, i32* inttoptr (i32 805874816 to i32*), align 4
- %4 = load volatile i32, i32* inttoptr (i32 805874824 to i32*), align 4
- %5 = load volatile i32, i32* inttoptr (i32 805874828 to i32*), align 4
+ %0 = load volatile i32, ptr inttoptr (i32 805874688 to ptr), align 4
+ %1 = load volatile i32, ptr inttoptr (i32 805874692 to ptr), align 4
+ %2 = load volatile i32, ptr inttoptr (i32 805874812 to ptr), align 4
+ %3 = load volatile i32, ptr inttoptr (i32 805874816 to ptr), align 4
+ %4 = load volatile i32, ptr inttoptr (i32 805874824 to ptr), align 4
+ %5 = load volatile i32, ptr inttoptr (i32 805874828 to ptr), align 4
ret void
}
target triple = "thumbv6m-none--musleabi"
; Check that for the same offset from the base constant, different types are materialized separately.
-; CHECK: %const = bitcast %5** getelementptr inbounds (%0, %0* @global, i32 0, i32 2, i32 0) to %5**
-; CHECK: %tmp = load %5*, %5** %const, align 4
-; CHECK: %base_bitcast = bitcast %5** %const to i8*
-; CHECK: %mat_gep = getelementptr i8, i8* %base_bitcast, i32 0
-; CHECK: %mat_bitcast = bitcast i8* %mat_gep to %4*
-; CHECK: tail call void undef(%5* nonnull %tmp, %4* %mat_bitcast)
-
-%0 = type { [16 x %1], %2, %4, [16 x %5], %6, %7, i32, [4 x i32], [8 x %3], i8, i8, i8, i8, i8, i8, i8, %8, %11, %11*, i32, i16, i8, i8, i8, i8, i8, i8, [15 x i16], i8, i8, [23 x %12], i8, i8*, i8, %13, i8, i8 }
+; CHECK: %const = bitcast ptr getelementptr inbounds (%0, ptr @global, i32 0, i32 2, i32 0) to ptr
+; CHECK: %tmp = load ptr, ptr %const, align 4
+; CHECK: tail call void undef(ptr nonnull %tmp, ptr %const)
+
+%0 = type { [16 x %1], %2, %4, [16 x %5], %6, %7, i32, [4 x i32], [8 x %3], i8, i8, i8, i8, i8, i8, i8, %8, %11, ptr, i32, i16, i8, i8, i8, i8, i8, i8, [15 x i16], i8, i8, [23 x %12], i8, ptr, i8, %13, i8, i8 }
%1 = type { i32, i32, i8, i8, i8, i8, i8, i8, i8, i8 }
-%2 = type { %3*, i16, i16, i16 }
+%2 = type { ptr, i16, i16, i16 }
%3 = type { [4 x i32] }
-%4 = type { %5*, %5*, i8 }
-%5 = type { [4 x i32], i8*, i8, i8 }
+%4 = type { ptr, ptr, i8 }
+%5 = type { [4 x i32], ptr, i8, i8 }
%6 = type { i8, [4 x i32] }
%7 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
-%8 = type { [16 x %9], %9*, %9*, %9*, %9*, %11, %11, %11, i8, i8, i8, i8 }
-%9 = type { %1, %11, %11, %9*, %9*, %10, i8, i8, i8, i8 }
+%8 = type { [16 x %9], ptr, ptr, ptr, ptr, %11, %11, %11, i8, i8, i8, i8 }
+%9 = type { %1, %11, %11, ptr, ptr, %10, i8, i8, i8, i8 }
%10 = type { i32, i16 }
-%11 = type { %11*, %11* }
+%11 = type { ptr, ptr }
%12 = type { i8, i16, i32 }
%13 = type { i32, i32, i8 }
br i1 undef, label %bb2, label %bb1
bb1: ; preds = %bb
- %tmp = load %5*, %5** getelementptr inbounds (%0, %0* @global, i32 0, i32 2, i32 0), align 4
- tail call void undef(%5* nonnull %tmp, %4* getelementptr inbounds (%0, %0* @global, i32 0, i32 2))
+ %tmp = load ptr, ptr getelementptr inbounds (%0, ptr @global, i32 0, i32 2, i32 0), align 4
+ tail call void undef(ptr nonnull %tmp, ptr getelementptr inbounds (%0, ptr @global, i32 0, i32 2))
unreachable
bb2: ; preds = %bb
define i32 @test1() nounwind {
; CHECK-LABEL: @test1
; CHECK: %const = bitcast i32 12345678 to i32
-; CHECK: %1 = inttoptr i32 %const to %T*
-; CHECK: %addr1 = getelementptr %T, %T* %1, i32 0, i32 1
- %addr1 = getelementptr %T, %T* inttoptr (i32 12345678 to %T*), i32 0, i32 1
- %tmp1 = load i32, i32* %addr1
- %addr2 = getelementptr %T, %T* inttoptr (i32 12345678 to %T*), i32 0, i32 2
- %tmp2 = load i32, i32* %addr2
- %addr3 = getelementptr %T, %T* inttoptr (i32 12345678 to %T*), i32 0, i32 3
- %tmp3 = load i32, i32* %addr3
+; CHECK: %1 = inttoptr i32 %const to ptr
+; CHECK: %addr1 = getelementptr %T, ptr %1, i32 0, i32 1
+ %addr1 = getelementptr %T, ptr inttoptr (i32 12345678 to ptr), i32 0, i32 1
+ %tmp1 = load i32, ptr %addr1
+ %addr2 = getelementptr %T, ptr inttoptr (i32 12345678 to ptr), i32 0, i32 2
+ %tmp2 = load i32, ptr %addr2
+ %addr3 = getelementptr %T, ptr inttoptr (i32 12345678 to ptr), i32 0, i32 3
+ %tmp3 = load i32, ptr %addr3
%tmp4 = add i32 %tmp1, %tmp2
%tmp5 = add i32 %tmp3, %tmp4
ret i32 %tmp5
if.end167:
; CHECK: and i32 {{.*}}, 32768
%shl161 = shl nuw nsw i32 %conv121, 15
- %0 = load i8, i8* undef, align 1
+ %0 = load i8, ptr undef, align 1
%conv169 = zext i8 %0 to i32
%shl170 = shl nuw nsw i32 %conv169, 7
%shl161.masked = and i32 %shl161, 32768
if.end167:
; CHECK: add i32 {{.*}}, -32758
%shl161 = shl nuw nsw i32 %conv121, 15
- %0 = load i8, i8* undef, align 1
+ %0 = load i8, ptr undef, align 1
%conv169 = zext i8 %0 to i32
%shl170 = shl nuw nsw i32 %conv169, 7
%shl161.masked = and i32 %shl161, 32773
; We don't want to convert constant divides because the benefit from converting
; them to a mul in the backend is larget than constant materialization savings.
-define void @signed_const_division(i64 %in1, i64 %in2, i64* %addr) {
+define void @signed_const_division(i64 %in1, i64 %in2, ptr %addr) {
; CHECK-LABEL: @signed_const_division(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK-NEXT: [[L1:%.*]] = phi i64 [ [[RES1:%.*]], [[LOOP]] ], [ [[IN1:%.*]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[L2:%.*]] = phi i64 [ [[RES2:%.*]], [[LOOP]] ], [ [[IN2:%.*]], [[ENTRY]] ]
; CHECK-NEXT: [[RES1]] = sdiv i64 [[L1]], 4294967296
-; CHECK-NEXT: store volatile i64 [[RES1]], i64* [[ADDR:%.*]]
+; CHECK-NEXT: store volatile i64 [[RES1]], ptr [[ADDR:%.*]]
; CHECK-NEXT: [[RES2]] = srem i64 [[L2]], 4294967296
-; CHECK-NEXT: store volatile i64 [[RES2]], i64* [[ADDR]]
+; CHECK-NEXT: store volatile i64 [[RES2]], ptr [[ADDR]]
; CHECK-NEXT: [[AGAIN:%.*]] = icmp eq i64 [[RES1]], [[RES2]]
; CHECK-NEXT: br i1 [[AGAIN]], label [[LOOP]], label [[END:%.*]]
; CHECK: end:
%l1 = phi i64 [%res1, %loop], [%in1, %entry]
%l2 = phi i64 [%res2, %loop], [%in2, %entry]
%res1 = sdiv i64 %l1, 4294967296
- store volatile i64 %res1, i64* %addr
+ store volatile i64 %res1, ptr %addr
%res2 = srem i64 %l2, 4294967296
- store volatile i64 %res2, i64* %addr
+ store volatile i64 %res2, ptr %addr
%again = icmp eq i64 %res1, %res2
br i1 %again, label %loop, label %end
ret void
}
-define void @unsigned_const_division(i64 %in1, i64 %in2, i64* %addr) {
+define void @unsigned_const_division(i64 %in1, i64 %in2, ptr %addr) {
; CHECK-LABEL: @unsigned_const_division(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK-NEXT: [[L1:%.*]] = phi i64 [ [[RES1:%.*]], [[LOOP]] ], [ [[IN1:%.*]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[L2:%.*]] = phi i64 [ [[RES2:%.*]], [[LOOP]] ], [ [[IN2:%.*]], [[ENTRY]] ]
; CHECK-NEXT: [[RES1]] = udiv i64 [[L1]], 4294967296
-; CHECK-NEXT: store volatile i64 [[RES1]], i64* [[ADDR:%.*]]
+; CHECK-NEXT: store volatile i64 [[RES1]], ptr [[ADDR:%.*]]
; CHECK-NEXT: [[RES2]] = urem i64 [[L2]], 4294967296
-; CHECK-NEXT: store volatile i64 [[RES2]], i64* [[ADDR]]
+; CHECK-NEXT: store volatile i64 [[RES2]], ptr [[ADDR]]
; CHECK-NEXT: [[AGAIN:%.*]] = icmp eq i64 [[RES1]], [[RES2]]
; CHECK-NEXT: br i1 [[AGAIN]], label [[LOOP]], label [[END:%.*]]
; CHECK: end:
%l1 = phi i64 [%res1, %loop], [%in1, %entry]
%l2 = phi i64 [%res2, %loop], [%in2, %entry]
%res1 = udiv i64 %l1, 4294967296
- store volatile i64 %res1, i64* %addr
+ store volatile i64 %res1, ptr %addr
%res2 = urem i64 %l2, 4294967296
- store volatile i64 %res2, i64* %addr
+ store volatile i64 %res2, ptr %addr
%again = icmp eq i64 %res1, %res2
br i1 %again, label %loop, label %end
; CHECK-LABEL: @PR40934(
; CHECK-NEXT: ret i32 undef
; CHECK: bb:
-; CHECK-NEXT: [[T2:%.*]] = call i32 (i64, ...) bitcast (i32 (...)* @d to i32 (i64, ...)*)(i64 7788015061)
+; CHECK-NEXT: [[T2:%.*]] = call i32 (i64, ...) @d(i64 7788015061)
; CHECK-NEXT: [[T3:%.*]] = and i64 [[T3]], 7788015061
; CHECK-NEXT: br label [[BB:%.*]]
;
ret i32 undef
bb:
- %t2 = call i32 (i64, ...) bitcast (i32 (...)* @d to i32 (i64, ...)*)(i64 7788015061)
+ %t2 = call i32 (i64, ...) @d(i64 7788015061)
%t3 = and i64 %t3, 7788015061
br label %bb
}
; CHECK: bb2:
; CHECK-NEXT: br label [[BB2]]
; CHECK: bb3:
-; CHECK-NEXT: [[TMP4:%.*]] = call i32 (i64, i64, ...) bitcast (i32 (...)* @c to i32 (i64, i64, ...)*)(i64 4208870971, i64 4208870971)
+; CHECK-NEXT: [[TMP4:%.*]] = call i32 (i64, i64, ...) @c(i64 4208870971, i64 4208870971)
; CHECK-NEXT: br label [[BB1]]
; CHECK: bb5:
-; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP]], align 4
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP]], align 4
; CHECK-NEXT: ret i32 [[TMP6]]
;
bb:
br label %bb2
bb3: ; No predecessors!
- %tmp4 = call i32 (i64, i64, ...) bitcast (i32 (...)* @c to i32 (i64, i64, ...)*)(i64 4208870971, i64 4208870971)
+ %tmp4 = call i32 (i64, i64, ...) @c(i64 4208870971, i64 4208870971)
br label %bb1
bb5: ; No predecessors!
- %tmp6 = load i32, i32* %tmp, align 4
+ %tmp6 = load i32, ptr %tmp, align 4
ret i32 %tmp6
}
define i32 @cast_inst_test() {
; CHECK-LABEL: @cast_inst_test
; CHECK: %const = bitcast i64 4646526064 to i64
-; CHECK: %1 = inttoptr i64 %const to i32*
-; CHECK: %v0 = load i32, i32* %1, align 16
+; CHECK: %1 = inttoptr i64 %const to ptr
+; CHECK: %v0 = load i32, ptr %1, align 16
; CHECK: %const_mat = add i64 %const, 16
-; CHECK-NEXT: %2 = inttoptr i64 %const_mat to i32*
-; CHECK-NEXT: %v1 = load i32, i32* %2, align 16
+; CHECK-NEXT: %2 = inttoptr i64 %const_mat to ptr
+; CHECK-NEXT: %v1 = load i32, ptr %2, align 16
; CHECK: %const_mat1 = add i64 %const, 32
-; CHECK-NEXT: %3 = inttoptr i64 %const_mat1 to i32*
-; CHECK-NEXT: %v2 = load i32, i32* %3, align 16
- %a0 = inttoptr i64 4646526064 to i32*
- %v0 = load i32, i32* %a0, align 16
- %a1 = inttoptr i64 4646526080 to i32*
- %v1 = load i32, i32* %a1, align 16
- %a2 = inttoptr i64 4646526096 to i32*
- %v2 = load i32, i32* %a2, align 16
+; CHECK-NEXT: %3 = inttoptr i64 %const_mat1 to ptr
+; CHECK-NEXT: %v2 = load i32, ptr %3, align 16
+ %a0 = inttoptr i64 4646526064 to ptr
+ %v0 = load i32, ptr %a0, align 16
+ %a1 = inttoptr i64 4646526080 to ptr
+ %v1 = load i32, ptr %a1, align 16
+ %a2 = inttoptr i64 4646526096 to ptr
+ %v2 = load i32, ptr %a2, align 16
%r0 = add i32 %v0, %v1
%r1 = add i32 %r0, %v2
ret i32 %r1
define i32 @test1() nounwind {
; CHECK-LABEL: @test1
; CHECK: %const = bitcast i32 12345678 to i32
-; CHECK: %1 = inttoptr i32 %const to %T*
-; CHECK: %addr1 = getelementptr %T, %T* %1, i32 0, i32 1
- %addr1 = getelementptr %T, %T* inttoptr (i32 12345678 to %T*), i32 0, i32 1
- %tmp1 = load i32, i32* %addr1
- %addr2 = getelementptr %T, %T* inttoptr (i32 12345678 to %T*), i32 0, i32 2
- %tmp2 = load i32, i32* %addr2
- %addr3 = getelementptr %T, %T* inttoptr (i32 12345678 to %T*), i32 0, i32 3
- %tmp3 = load i32, i32* %addr3
+; CHECK: %1 = inttoptr i32 %const to ptr
+; CHECK: %addr1 = getelementptr %T, ptr %1, i32 0, i32 1
+ %addr1 = getelementptr %T, ptr inttoptr (i32 12345678 to ptr), i32 0, i32 1
+ %tmp1 = load i32, ptr %addr1
+ %addr2 = getelementptr %T, ptr inttoptr (i32 12345678 to ptr), i32 0, i32 2
+ %tmp2 = load i32, ptr %addr2
+ %addr3 = getelementptr %T, ptr inttoptr (i32 12345678 to ptr), i32 0, i32 3
+ %tmp3 = load i32, ptr %addr3
%tmp4 = add i32 %tmp1, %tmp2
%tmp5 = add i32 %tmp3, %tmp4
ret i32 %tmp5
; CHECK-NOT: !dbg !11
; CHECK: inttoptr
entry:
- %a0 = inttoptr i64 4646526064 to i32*
- %v0 = load i32, i32* %a0, align 16, !dbg !11
+ %a0 = inttoptr i64 4646526064 to ptr
+ %v0 = load i32, ptr %a0, align 16, !dbg !11
%c = alloca i32, align 4
- store i32 1, i32* %c, align 4
- %0 = load i32, i32* %c, align 4
+ store i32 1, ptr %c, align 4
+ %0 = load i32, ptr %c, align 4
%cmp = icmp eq i32 %0, 0
br i1 %cmp, label %if.then, label %if.else
if.then: ; preds = %entry
- %a1 = inttoptr i64 4646526080 to i32*
- %v1 = load i32, i32* %a1, align 16, !dbg !11
+ %a1 = inttoptr i64 4646526080 to ptr
+ %v1 = load i32, ptr %a1, align 16, !dbg !11
br label %return
if.else: ; preds = %entry
- %a2 = inttoptr i64 4646526096 to i32*
- %v2 = load i32, i32* %a2, align 16, !dbg !11
+ %a2 = inttoptr i64 4646526096 to ptr
+ %v2 = load i32, ptr %a2, align 16, !dbg !11
br label %return
return: ; preds = %if.else, %if.then
; CHECK: bitcast
; CHECK: !dbg !11
; CHECK: inttoptr
- %a0 = inttoptr i64 4646526064 to i32*, !dbg !11
- %v0 = load i32, i32* %a0, align 16, !dbg !11
+ %a0 = inttoptr i64 4646526064 to ptr, !dbg !11
+ %v0 = load i32, ptr %a0, align 16, !dbg !11
- %a1 = inttoptr i64 4646526080 to i32*
- %v1 = load i32, i32* %a1, align 16, !dbg !11
+ %a1 = inttoptr i64 4646526080 to ptr
+ %v1 = load i32, ptr %a1, align 16, !dbg !11
- %a2 = inttoptr i64 4646526096 to i32*
- %v2 = load i32, i32* %a2, align 16, !dbg !11
+ %a2 = inttoptr i64 4646526096 to ptr
+ %v2 = load i32, ptr %a2, align 16, !dbg !11
%r0 = add i32 %v0, %v1
%r1 = add i32 %r0, %v2
define i32 @test1() nounwind {
; CHECK-LABEL: @test1
; CHECK: %const = bitcast i32 12345678 to i32
-; CHECK-NOT: %base = inttoptr i32 12345678 to %T*
-; CHECK-NEXT: %1 = inttoptr i32 %const to %T*
-; CHECK-NEXT: %addr1 = getelementptr %T, %T* %1, i32 0, i32 1
-; CHECK-NEXT: %addr2 = getelementptr %T, %T* %1, i32 0, i32 2
-; CHECK-NEXT: %addr3 = getelementptr %T, %T* %1, i32 0, i32 3
- %base = inttoptr i32 12345678 to %T*
- %addr1 = getelementptr %T, %T* %base, i32 0, i32 1
- %addr2 = getelementptr %T, %T* %base, i32 0, i32 2
- %addr3 = getelementptr %T, %T* %base, i32 0, i32 3
+; CHECK-NOT: %base = inttoptr i32 12345678 to ptr
+; CHECK-NEXT: %1 = inttoptr i32 %const to ptr
+; CHECK-NEXT: %addr1 = getelementptr %T, ptr %1, i32 0, i32 1
+; CHECK-NEXT: %addr2 = getelementptr %T, ptr %1, i32 0, i32 2
+; CHECK-NEXT: %addr3 = getelementptr %T, ptr %1, i32 0, i32 3
+ %base = inttoptr i32 12345678 to ptr
+ %addr1 = getelementptr %T, ptr %base, i32 0, i32 1
+ %addr2 = getelementptr %T, ptr %base, i32 0, i32 2
+ %addr3 = getelementptr %T, ptr %base, i32 0, i32 3
ret i32 12345678
}
; BFIHOIST: br label %endif
; Function Attrs: norecurse
-define i32 @main(i32 %argc, i8** nocapture readnone %argv) local_unnamed_addr #0 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define i32 @main(i32 %argc, ptr nocapture readnone %argv) local_unnamed_addr #0 personality ptr @__CxxFrameHandler3 {
%call = tail call i64 @fn(i64 0)
%call1 = tail call i64 @fn(i64 1)
%tobool = icmp eq i32 %argc, 0
%3 = catchswitch within none [label %4] unwind to caller
; <label>:4: ; preds = %catch.dispatch
- %5 = catchpad within %3 [i8* null, i32 64, i8* null]
+ %5 = catchpad within %3 [ptr null, i32 64, ptr null]
br i1 %tobool, label %then, label %else
then:
target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-pc-windows-msvc"
-define internal fastcc void @baz(i8* %arg) unnamed_addr personality i8* bitcast (i32 (...)* @wobble to i8*) {
+define internal fastcc void @baz(ptr %arg) unnamed_addr personality ptr @wobble {
; CHECK-LABEL: @baz
bb:
- %tmp = invoke noalias dereferenceable(40) i8* @wibble.2(i64 40)
+ %tmp = invoke noalias dereferenceable(40) ptr @wibble.2(i64 40)
to label %bb6 unwind label %bb1
bb1: ; preds = %bb
%tmp2 = catchswitch within none [label %bb3] unwind label %bb16
bb3: ; preds = %bb1
- %tmp4 = catchpad within %tmp2 [i8* null, i32 64, i8* null]
- invoke void @spam(i8* null) [ "funclet"(token %tmp4) ]
+ %tmp4 = catchpad within %tmp2 [ptr null, i32 64, ptr null]
+ invoke void @spam(ptr null) [ "funclet"(token %tmp4) ]
to label %bb5 unwind label %bb16
bb5: ; preds = %bb3
unreachable
bb6: ; preds = %bb
- %tmp7 = icmp eq i8* %arg, null
+ %tmp7 = icmp eq ptr %arg, null
br label %bb9
bb9: ; preds = %bb8, %bb6
- %tmp10 = inttoptr i64 -6148914691236517376 to i16*
- %tmp11 = invoke noalias dereferenceable(40) i8* @wibble.2(i64 40)
+ %tmp10 = inttoptr i64 -6148914691236517376 to ptr
+ %tmp11 = invoke noalias dereferenceable(40) ptr @wibble.2(i64 40)
to label %bb15 unwind label %bb12
bb12: ; preds = %bb9
ret void
bb16: ; preds = %bb14, %bb3, %bb1
- %tmp17 = phi i16* [ inttoptr (i64 -6148914691236517376 to i16*), %bb1 ], [ inttoptr (i64 -6148914691236517376 to i16*), %bb3 ], [ %tmp10, %bb14 ]
+ %tmp17 = phi ptr [ inttoptr (i64 -6148914691236517376 to ptr), %bb1 ], [ inttoptr (i64 -6148914691236517376 to ptr), %bb3 ], [ %tmp10, %bb14 ]
%tmp18 = cleanuppad within none []
br label %bb19
cleanupret from %tmp18 unwind to caller
}
-declare i8* @wibble.2(i64)
+declare ptr @wibble.2(i64)
-declare dso_local void @spam(i8*) local_unnamed_addr
+declare dso_local void @spam(ptr) local_unnamed_addr
declare i32 @wobble(...)
target triple = "x86_64-apple-macosx10.9.0"
; PR18626
-define i8* @test1(i1 %cmp, i64* %tmp) {
+define ptr @test1(i1 %cmp, ptr %tmp) {
entry:
- call void @foo(i8* inttoptr (i64 68719476735 to i8*))
+ call void @foo(ptr inttoptr (i64 68719476735 to ptr))
br i1 %cmp, label %if.end, label %return
if.end: ; preds = %bb1
- call void @foo(i8* inttoptr (i64 68719476736 to i8*))
+ call void @foo(ptr inttoptr (i64 68719476736 to ptr))
br label %return
return:
- %retval.0 = phi i8* [ null, %entry ], [ inttoptr (i64 68719476736 to i8*), %if.end ]
- store i64 1172321806, i64* %tmp
- ret i8* %retval.0
+ %retval.0 = phi ptr [ null, %entry ], [ inttoptr (i64 68719476736 to ptr), %if.end ]
+ store i64 1172321806, ptr %tmp
+ ret ptr %retval.0
; CHECK-LABEL: @test1
; CHECK: if.end:
-; CHECK: %2 = inttoptr i64 %const to i8*
+; CHECK: %2 = inttoptr i64 %const to ptr
; CHECK-NEXT: br
; CHECK: return:
-; CHECK-NEXT: %retval.0 = phi i8* [ null, %entry ], [ %2, %if.end ]
+; CHECK-NEXT: %retval.0 = phi ptr [ null, %entry ], [ %2, %if.end ]
}
-define void @test2(i1 %cmp, i64** %tmp) {
+define void @test2(i1 %cmp, ptr %tmp) {
entry:
- call void @foo(i8* inttoptr (i64 68719476736 to i8*))
+ call void @foo(ptr inttoptr (i64 68719476736 to ptr))
br i1 %cmp, label %if.end, label %return
if.end: ; preds = %bb1
- call void @foo(i8* inttoptr (i64 68719476736 to i8*))
+ call void @foo(ptr inttoptr (i64 68719476736 to ptr))
br label %return
return:
- store i64* inttoptr (i64 68719476735 to i64*), i64** %tmp
+ store ptr inttoptr (i64 68719476735 to ptr), ptr %tmp
ret void
; CHECK-LABEL: @test2
; CHECK: return:
; CHECK-NEXT: %const_mat = add i64 %const, -1
-; CHECK-NEXT: inttoptr i64 %const_mat to i64*
+; CHECK-NEXT: inttoptr i64 %const_mat to ptr
}
-declare void @foo(i8*)
+declare void @foo(ptr)
; PR18768
define i32 @test3(i1 %c) {
br label %if.end3
if.end3: ; preds = %if.then, %entry
- %d.0 = phi i32* [ inttoptr (i64 985162435264511 to i32*), %entry ], [ null, %if.then ]
- %cmp4 = icmp eq i32* %d.0, inttoptr (i64 985162435264511 to i32*)
- %cmp6 = icmp eq i32* %d.0, inttoptr (i64 985162418487296 to i32*)
+ %d.0 = phi ptr [ inttoptr (i64 985162435264511 to ptr), %entry ], [ null, %if.then ]
+ %cmp4 = icmp eq ptr %d.0, inttoptr (i64 985162435264511 to ptr)
+ %cmp6 = icmp eq ptr %d.0, inttoptr (i64 985162418487296 to ptr)
%or = or i1 %cmp4, %cmp6
br i1 %or, label %if.then8, label %if.end9
define i64 @switch_test2(i64 %a) {
; CHECK-LABEL: @switch_test2
-; CHECK: %2 = phi i64* [ %1, %case2 ], [ %0, %Entry ], [ %0, %Entry ]
+; CHECK: %2 = phi ptr [ %1, %case2 ], [ %0, %Entry ], [ %0, %Entry ]
Entry:
%sel = add i64 %a, 4519019440
switch i64 %sel, label %fail [
br label %continuation
continuation:
- %0 = phi i64* [ inttoptr(i64 4519019440 to i64*), %case2 ], [ inttoptr(i64 4519019460 to i64*), %Entry ], [ inttoptr(i64 4519019460 to i64*), %Entry ]
+ %0 = phi ptr [ inttoptr(i64 4519019440 to ptr), %case2 ], [ inttoptr(i64 4519019460 to ptr), %Entry ], [ inttoptr(i64 4519019460 to ptr), %Entry ]
ret i64 0;
fail:
; CHECK: land.rhs:
; CHECK-NEXT: unreachable
; CHECK: land.end:
-; CHECK-NEXT: [[CONST1:%.*]] = bitcast i16* getelementptr inbounds ([2 x i16], [2 x i16]* @a, i32 0, i32 0) to i16*
-; CHECK-NEXT: [[CMP:%.*]] = icmp ule i16* undef, [[CONST1]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ule ptr undef, @a
; CHECK-NEXT: unreachable
; CHECK: for.cond3:
-; CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* getelementptr inbounds ([2 x i16], [2 x i16]* @a, i32 0, i32 1), align 1
+; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr getelementptr inbounds ([2 x i16], ptr @a, i32 0, i32 1), align 1
; CHECK-NEXT: br label [[FOR_COND3:%.*]]
; CHECK: for.end4:
-; CHECK-NEXT: [[CONST:%.*]] = bitcast i16* getelementptr inbounds ([2 x i16], [2 x i16]* @a, i32 0, i32 0) to i16*
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONST]], align 1
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @a, align 1
; CHECK-NEXT: ret void
;
for.cond:
unreachable
land.end: ; preds = %for.body2
- %cmp = icmp ule i16* undef, getelementptr inbounds ([2 x i16], [2 x i16]* @a, i32 0, i32 0)
+ %cmp = icmp ule ptr undef, @a
unreachable
for.cond3: ; preds = %for.cond3
- %tmp0 = load i16, i16* getelementptr inbounds ([2 x i16], [2 x i16]* @a, i32 0, i32 1), align 1
+ %tmp0 = load i16, ptr getelementptr inbounds ([2 x i16], ptr @a, i32 0, i32 1), align 1
br label %for.cond3
for.end4: ; preds = %for.cond
- %tmp1 = load i16, i16* getelementptr inbounds ([2 x i16], [2 x i16]* @a, i32 0, i32 0), align 1
+ %tmp1 = load i16, ptr @a, align 1
ret void
}
; RUN: opt < %s -passes=constmerge > /dev/null
-@foo.upgrd.1 = internal constant { i32 } { i32 7 } ; <{ i32 }*> [#uses=1]
-@bar = internal constant { i32 } { i32 7 } ; <{ i32 }*> [#uses=1]
+@foo.upgrd.1 = internal constant { i32 } { i32 7 } ; <ptr> [#uses=1]
+@bar = internal constant { i32 } { i32 7 } ; <ptr> [#uses=1]
-declare i32 @test(i32*)
+declare i32 @test(ptr)
define void @foo() {
- call i32 @test( i32* getelementptr ({ i32 }, { i32 }* @foo.upgrd.1, i64 0, i32 0) ) ; <i32>:1 [#uses=0]
- call i32 @test( i32* getelementptr ({ i32 }, { i32 }* @bar, i64 0, i32 0) ) ; <i32>:2 [#uses=0]
+ call i32 @test( ptr @foo.upgrd.1 ) ; <i32>:1 [#uses=0]
+ call i32 @test( ptr @bar ) ; <i32>:2 [#uses=0]
ret void
}
; CHECK: @foo = constant i32 6
; CHECK: @bar = constant i32 6
-@foo = constant i32 6 ; <i32*> [#uses=0]
-@bar = constant i32 6 ; <i32*> [#uses=0]
+@foo = constant i32 6 ; <ptr> [#uses=0]
+@bar = constant i32 6 ; <ptr> [#uses=0]
; RUN: opt -passes=constmerge -S < %s | FileCheck %s
; PR8978
-declare i32 @zed(%struct.foobar*, %struct.foobar*)
+declare i32 @zed(ptr, ptr)
%struct.foobar = type { i32 }
; CHECK: bar.d
define i32 @main() nounwind ssp {
entry:
; CHECK: bar.d
- %call2 = tail call i32 @zed(%struct.foobar* @foo.d, %struct.foobar* @bar.d)
+ %call2 = tail call i32 @zed(ptr @foo.d, ptr @bar.d)
nounwind
ret i32 0
}
@T1B = internal unnamed_addr constant i32 1, align 2
; CHECK: @T1B = internal unnamed_addr constant i32 1, align 4
-define void @test1(i32** %P1, i32** %P2) {
- store i32* @T1A, i32** %P1
- store i32* @T1B, i32** %P2
+define void @test1(ptr %P1, ptr %P2) {
+ store ptr @T1A, ptr %P1
+ store ptr @T1B, ptr %P2
ret void
}
@T2B = internal unnamed_addr constant i32 2, align 2
; CHECK: @T2B = internal unnamed_addr constant i32 2, align 2
-define void @test2(i32** %P1, i32** %P2) {
- store i32* @T2A, i32** %P1
- store i32* @T2B, i32** %P2
+define void @test2(ptr %P1, ptr %P2) {
+ store ptr @T2A, ptr %P1
+ store ptr @T2B, ptr %P2
ret void
}
; CHECK: @T1G2
; CHECK: @T1G3
-define void @test1(i32** %P1, i32** %P2, i32** %P3) {
- store i32* @T1G1, i32** %P1
- store i32* @T1G2, i32** %P2
- store i32* @T1G3, i32** %P3
+define void @test1(ptr %P1, ptr %P2, ptr %P3) {
+ store ptr @T1G1, ptr %P1
+ store ptr @T1G2, ptr %P2
+ store ptr @T1G3, ptr %P3
ret void
}
; CHECK: @T2a
; CHECK: @T2b
-define void @test2(i32** %P1, i32 addrspace(30)** %P2) {
- store i32* @T2a, i32** %P1
- store i32 addrspace(30)* @T2b, i32 addrspace(30)** %P2
+define void @test2(ptr %P1, ptr %P2) {
+ store ptr @T2a, ptr %P1
+ store ptr addrspace(30) @T2b, ptr %P2
ret void
}
@T3A = internal constant i32 0
@T3B = internal constant i32 0
-@llvm.used = appending global [2 x i32*] [i32* @T3A, i32* @T3B], section
+@llvm.used = appending global [2 x ptr] [ptr @T3A, ptr @T3B], section
"llvm.metadata"
define void @test3() {
; CHECK: @T4D1
; CHECK: @T4D2
-define void @test4(i32** %P1, i32** %P2, i32** %P3, i32** %P4, i32** %P5, i32** %P6, i32** %P7, i32** %P8) {
- store i32* @T4A1, i32** %P1
- store i32* @T4A2, i32** %P2
- store i32* @T4B1, i32** %P3
- store i32* @T4B2, i32** %P4
- store i32* @T4C1, i32** %P5
- store i32* @T4C2, i32** %P6
- store i32* @T4D1, i32** %P7
- store i32* @T4D2, i32** %P8
+define void @test4(ptr %P1, ptr %P2, ptr %P3, ptr %P4, ptr %P5, ptr %P6, ptr %P7, ptr %P8) {
+ store ptr @T4A1, ptr %P1
+ store ptr @T4A2, ptr %P2
+ store ptr @T4B1, ptr %P3
+ store ptr @T4B2, ptr %P4
+ store ptr @T4C1, ptr %P5
+ store ptr @T4C2, ptr %P6
+ store ptr @T4D1, ptr %P7
+ store ptr @T4D2, ptr %P8
ret void
}
@T5tls = private thread_local constant i32 555
@T5ua = private unnamed_addr constant i32 555
-define void @test5(i32** %P1, i32** %P2) {
- store i32* @T5tls, i32** %P1
- store i32* @T5ua, i32** %P2
+define void @test5(ptr %P1, ptr %P2) {
+ store ptr @T5tls, ptr %P1
+ store ptr @T5ua, ptr %P2
ret void
}
; Test that in one run var3 is merged into var2 and var1 into var4.
; Test that we merge @var5 and @var6 into one with the higher alignment
-declare void @zed(%struct.foobar*, %struct.foobar*)
+declare void @zed(ptr, ptr)
%struct.foobar = type { i32 }
; CHECK: @var2 = constant %struct.foobar { i32 2 }
; CHECK-NEXT: @var4 = constant %struct.foobar { i32 2 }
-declare void @helper([16 x i8]*)
+declare void @helper(ptr)
@var5 = internal constant [16 x i8] c"foo1bar2foo3bar\00", align 16
@var6 = private unnamed_addr constant [16 x i8] c"foo1bar2foo3bar\00", align 1
@var7 = internal constant [16 x i8] c"foo1bar2foo3bar\00"
; CHECK-NEXT: @var7 = internal constant [16 x i8] c"foo1bar2foo3bar\00"
; CHECK-NEXT: @var8 = private constant [16 x i8] c"foo1bar2foo3bar\00", align 16
-@var4a = alias %struct.foobar, %struct.foobar* @var4
-@llvm.used = appending global [1 x %struct.foobar*] [%struct.foobar* @var4a], section "llvm.metadata"
+@var4a = alias %struct.foobar, ptr @var4
+@llvm.used = appending global [1 x ptr] [ptr @var4a], section "llvm.metadata"
define i32 @main() {
entry:
- call void @zed(%struct.foobar* @var1, %struct.foobar* @var2)
- call void @zed(%struct.foobar* @var3, %struct.foobar* @var4)
- call void @helper([16 x i8]* @var5)
- call void @helper([16 x i8]* @var6)
- call void @helper([16 x i8]* @var7)
- call void @helper([16 x i8]* @var8)
+ call void @zed(ptr @var1, ptr @var2)
+ call void @zed(ptr @var3, ptr @var4)
+ call void @helper(ptr @var5)
+ call void @helper(ptr @var6)
+ call void @helper(ptr @var7)
+ call void @helper(ptr @var8)
ret i32 0
}
@a = internal constant i32 1, !dbg !0
@b = unnamed_addr constant i32 1, !dbg !9
-define void @test1(i32** %P1, i32** %P2) {
- store i32* @a, i32** %P1
- store i32* @b, i32** %P2
+define void @test1(ptr %P1, ptr %P2) {
+ store ptr @a, ptr %P1
+ store ptr @b, ptr %P2
ret void
}
; Test which corresponding x and y are merged and that unnamed_addr
; is correctly set.
-declare void @zed(%struct.foobar*, %struct.foobar*)
+declare void @zed(ptr, ptr)
%struct.foobar = type { i32 }
; CHECK-NEXT: @test3.y = constant %struct.foobar { i32 3 }
; CHECK-NEXT: @test4.y = unnamed_addr constant %struct.foobar { i32 4 }
; CHECK-NOT: @
-; CHECK: declare void @zed(%struct.foobar*, %struct.foobar*)
+; CHECK: declare void @zed(ptr, ptr)
define i32 @main() {
entry:
- call void @zed(%struct.foobar* @test1.x, %struct.foobar* @test1.y)
- call void @zed(%struct.foobar* @test2.x, %struct.foobar* @test2.y)
- call void @zed(%struct.foobar* @test3.x, %struct.foobar* @test3.y)
- call void @zed(%struct.foobar* @test4.x, %struct.foobar* @test4.y)
+ call void @zed(ptr @test1.x, ptr @test1.y)
+ call void @zed(ptr @test2.x, ptr @test2.y)
+ call void @zed(ptr @test3.x, ptr @test3.y)
+ call void @zed(ptr @test4.x, ptr @test4.y)
ret i32 0
}
; RUN: opt -S -passes=cross-dso-cfi < %s | FileCheck %s
-; CHECK: define void @__cfi_check(i64 %[[TYPE:.*]], i8* %[[ADDR:.*]], i8* %[[DATA:.*]]) align 4096
+; CHECK: define void @__cfi_check(i64 %[[TYPE:.*]], ptr %[[ADDR:.*]], ptr %[[DATA:.*]]) align 4096
; CHECK: switch i64 %[[TYPE]], label %[[FAIL:.*]] [
; CHECK-NEXT: i64 111, label %[[L1:.*]]
; CHECK-NEXT: i64 222, label %[[L2:.*]]
; CHECK-NEXT: ret void
; CHECK: [[FAIL]]:
-; CHECK-NEXT: call void @__cfi_check_fail(i8* %[[DATA]], i8* %[[ADDR]])
+; CHECK-NEXT: call void @__cfi_check_fail(ptr %[[DATA]], ptr %[[ADDR]])
; CHECK-NEXT: br label %[[EXIT]]
; CHECK: [[L1]]:
-; CHECK-NEXT: call i1 @llvm.type.test(i8* %[[ADDR]], metadata i64 111)
+; CHECK-NEXT: call i1 @llvm.type.test(ptr %[[ADDR]], metadata i64 111)
; CHECK-NEXT: br {{.*}} label %[[EXIT]], label %[[FAIL]]
; CHECK: [[L2]]:
-; CHECK-NEXT: call i1 @llvm.type.test(i8* %[[ADDR]], metadata i64 222)
+; CHECK-NEXT: call i1 @llvm.type.test(ptr %[[ADDR]], metadata i64 222)
; CHECK-NEXT: br {{.*}} label %[[EXIT]], label %[[FAIL]]
; CHECK: [[L3]]:
-; CHECK-NEXT: call i1 @llvm.type.test(i8* %[[ADDR]], metadata i64 333)
+; CHECK-NEXT: call i1 @llvm.type.test(ptr %[[ADDR]], metadata i64 333)
; CHECK-NEXT: br {{.*}} label %[[EXIT]], label %[[FAIL]]
; CHECK: [[L4]]:
-; CHECK-NEXT: call i1 @llvm.type.test(i8* %[[ADDR]], metadata i64 444)
+; CHECK-NEXT: call i1 @llvm.type.test(ptr %[[ADDR]], metadata i64 444)
; CHECK-NEXT: br {{.*}} label %[[EXIT]], label %[[FAIL]]
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
ret i32 5
}
-define weak_odr hidden void @__cfi_check_fail(i8*, i8*) {
+define weak_odr hidden void @__cfi_check_fail(ptr, ptr) {
entry:
ret void
}
; CHECK-NOT: < bb43 bb49 bb59 bb3 bb31 bb41 > [ 77, bb43 ]
bb:
%i = alloca [420 x i8], align 1
- %i2 = getelementptr inbounds [420 x i8], [420 x i8]* %i, i64 0, i64 390
+ %i2 = getelementptr inbounds [420 x i8], ptr %i, i64 0, i64 390
br label %bb3
bb3: ; preds = %bb59, %bb
- %i4 = phi i8* [ %i2, %bb ], [ %i60, %bb59 ]
+ %i4 = phi ptr [ %i2, %bb ], [ %i60, %bb59 ]
%i5 = phi i8 [ 77, %bb ], [ %i64, %bb59 ]
%i6 = phi i32 [ 2, %bb ], [ %i63, %bb59 ]
%i7 = phi i32 [ 26, %bb ], [ %i62, %bb59 ]
%i13 = mul nsw i32 %i12, 3
%i14 = add nsw i32 %i13, %i6
%i15 = sext i32 %i14 to i64
- %i16 = getelementptr inbounds i8, i8* %i4, i64 %i15
- %i17 = load i8, i8* %i16, align 1
+ %i16 = getelementptr inbounds i8, ptr %i4, i64 %i15
+ %i17 = load i8, ptr %i16, align 1
%i18 = icmp sgt i8 %i17, 0
br i1 %i18, label %bb21, label %bb31
br i1 true, label %bb59, label %bb43
bb59: ; preds = %bb49, %bb43, %bb31, %bb21
- %i60 = phi i8* [ %i44, %bb49 ], [ %i44, %bb43 ], [ %i34, %bb31 ], [ %i4, %bb21 ]
+ %i60 = phi ptr [ %i44, %bb49 ], [ %i44, %bb43 ], [ %i34, %bb31 ], [ %i4, %bb21 ]
%i61 = phi i32 [ %i45, %bb49 ], [ %i45, %bb43 ], [ %i33, %bb31 ], [ %i8, %bb21 ]
%i62 = phi i32 [ %i47, %bb49 ], [ %i47, %bb43 ], [ %i32, %bb31 ], [ %i7, %bb21 ]
%i63 = phi i32 [ %i48, %bb49 ], [ %i48, %bb43 ], [ 2, %bb31 ], [ %i6, %bb21 ]
bb31: ; preds = %bb3
%i32 = add nsw i32 %i7, -1
%i33 = add nsw i32 %i8, -1
- %i34 = getelementptr inbounds i8, i8* %i4, i64 -15
+ %i34 = getelementptr inbounds i8, ptr %i4, i64 -15
%i35 = icmp eq i8 %i5, 77
br i1 %i35, label %bb59, label %bb41
br label %bb43
bb43: ; preds = %bb41, %bb21
- %i44 = phi i8* [ %i34, %bb41 ], [ %i4, %bb21 ]
+ %i44 = phi ptr [ %i34, %bb41 ], [ %i4, %bb21 ]
%i45 = phi i32 [ %i33, %bb41 ], [ %i8, %bb21 ]
%i46 = phi i8 [ 77, %bb41 ], [ %i5, %bb21 ]
%i47 = phi i32 [ %i32, %bb41 ], [ %i7, %bb21 ]
; RUN: opt < %s -passes=deadargelim -disable-output
-define internal void @build_delaunay({ i32 }* sret({ i32 }) %agg.result) {
+define internal void @build_delaunay(ptr sret({ i32 }) %agg.result) {
ret void
}
define void @test() {
- call void @build_delaunay({ i32 }* sret({ i32 }) null)
+ call void @build_delaunay(ptr sret({ i32 }) null)
ret void
}
; CHECK: define internal void @foo(i8 signext %y) [[NUW:#[0-9]+]]
-define internal zeroext i8 @foo(i8* inreg %p, i8 signext %y, ... ) nounwind {
- store i8 %y, i8* @g
+define internal zeroext i8 @foo(ptr inreg %p, i8 signext %y, ... ) nounwind {
+ store i8 %y, ptr @g
ret i8 0
}
define i32 @bar() {
; CHECK: call void @foo(i8 signext 1) [[NUW]]
- %A = call zeroext i8(i8*, i8, ...) @foo(i8* inreg null, i8 signext 1, %struct* byval(%struct) null ) nounwind
+ %A = call zeroext i8(ptr, i8, ...) @foo(ptr inreg null, i8 signext 1, ptr byval(%struct) null ) nounwind
ret i32 0
}
; RUN: opt < %s -passes=deadargelim -S | grep byval
%struct.point = type { double, double }
-@pts = global [4 x %struct.point] [ %struct.point { double 1.000000e+00, double 2.000000e+00 }, %struct.point { double 3.000000e+00, double 4.000000e+00 }, %struct.point { double 5.000000e+00, double 6.000000e+00 }, %struct.point { double 7.000000e+00, double 8.000000e+00 } ], align 32 ; <[4 x %struct.point]*> [#uses=1]
+@pts = global [4 x %struct.point] [ %struct.point { double 1.000000e+00, double 2.000000e+00 }, %struct.point { double 3.000000e+00, double 4.000000e+00 }, %struct.point { double 5.000000e+00, double 6.000000e+00 }, %struct.point { double 7.000000e+00, double 8.000000e+00 } ], align 32 ; <ptr> [#uses=1]
define internal i32 @va1(i32 %nargs, ...) {
entry:
- %pi = alloca %struct.point ; <%struct.point*> [#uses=0]
- %args = alloca i8* ; <i8**> [#uses=2]
+ %pi = alloca %struct.point ; <ptr> [#uses=0]
+ %args = alloca ptr ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %args1 = bitcast i8** %args to i8* ; <i8*> [#uses=1]
- call void @llvm.va_start( i8* %args1 )
- %args41 = bitcast i8** %args to i8* ; <i8*> [#uses=1]
- call void @llvm.va_end( i8* %args41 )
+ call void @llvm.va_start( ptr %args )
+ call void @llvm.va_end( ptr %args )
ret i32 undef
}
-declare void @llvm.va_start(i8*) nounwind
+declare void @llvm.va_start(ptr) nounwind
-declare void @llvm.va_end(i8*) nounwind
+declare void @llvm.va_end(ptr) nounwind
define i32 @main() {
entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp = getelementptr [4 x %struct.point], [4 x %struct.point]* @pts, i32 0, i32 0 ; <%struct.point*> [#uses=1]
- %tmp1 = call i32 (i32, ...) @va1(i32 1, %struct.point* byval(%struct.point) %tmp) nounwind ; <i32> [#uses=0]
+ %tmp = getelementptr [4 x %struct.point], ptr @pts, i32 0, i32 0 ; <ptr> [#uses=1]
+ %tmp1 = call i32 (i32, ...) @va1(i32 1, ptr byval(%struct.point) %tmp) nounwind ; <i32> [#uses=0]
call void @exit( i32 0 ) noreturn nounwind
unreachable
}
ret {i32,i32} {i32 42, i32 4}
}
-define i32 @bar() personality i32 (...)* @__gxx_personality_v0 {
+define i32 @bar() personality ptr @__gxx_personality_v0 {
%x = invoke {i32,i32} @foo() to label %T unwind label %T2
T:
%y = extractvalue {i32,i32} %x, 1
ret i32 %y
T2:
- %exn = landingpad {i8*, i32}
+ %exn = landingpad {ptr, i32}
cleanup
unreachable
}
-define i32 @bar2() personality i32 (...)* @__gxx_personality_v0 {
+define i32 @bar2() personality ptr @__gxx_personality_v0 {
entry:
%x = invoke {i32,i32} @foo() to label %T unwind label %T2
T:
%y = extractvalue {i32,i32} %x, 1
ret i32 %y
T2:
- %exn = landingpad {i8*, i32}
+ %exn = landingpad {ptr, i32}
cleanup
unreachable
}
; Apart from checking if debug metadata is correctly propagated, this also tests whether DW_CC_nocall
; calling convention is added when either return values or arguments are removed.
-@.str = private constant [1 x i8] zeroinitializer, align 1 ; <[1 x i8]*> [#uses=1]
+@.str = private constant [1 x i8] zeroinitializer, align 1 ; <ptr> [#uses=1]
-define i8* @vfs_addname(i8* %name, i32 %len, i32 %hash, i32 %flags) nounwind ssp !dbg !1 {
+define ptr @vfs_addname(ptr %name, i32 %len, i32 %hash, i32 %flags) nounwind ssp !dbg !1 {
;
entry:
- call void @llvm.dbg.value(metadata i8* %name, metadata !0, metadata !DIExpression()), !dbg !DILocation(scope: !1)
+ call void @llvm.dbg.value(metadata ptr %name, metadata !0, metadata !DIExpression()), !dbg !DILocation(scope: !1)
call void @llvm.dbg.value(metadata i32 %len, metadata !10, metadata !DIExpression()), !dbg !DILocation(scope: !1)
call void @llvm.dbg.value(metadata i32 %hash, metadata !11, metadata !DIExpression()), !dbg !DILocation(scope: !1)
call void @llvm.dbg.value(metadata i32 %flags, metadata !12, metadata !DIExpression()), !dbg !DILocation(scope: !1)
-; CHECK: call fastcc i8* @add_name_internal(i8* %name, i32 %hash) [[NUW:#[0-9]+]], !dbg !{{[0-9]+}}
- %0 = call fastcc i8* @add_name_internal(i8* %name, i32 %len, i32 %hash, i8 zeroext 0, i32 %flags) nounwind, !dbg !13 ; <i8*> [#uses=1]
- ret i8* %0, !dbg !13
+; CHECK: call fastcc ptr @add_name_internal(ptr %name, i32 %hash) [[NUW:#[0-9]+]], !dbg !{{[0-9]+}}
+ %0 = call fastcc ptr @add_name_internal(ptr %name, i32 %len, i32 %hash, i8 zeroext 0, i32 %flags) nounwind, !dbg !13 ; <ptr> [#uses=1]
+ ret ptr %0, !dbg !13
}
declare void @llvm.dbg.declare(metadata, metadata, metadata) nounwind readnone
-define internal fastcc i8* @add_name_internal(i8* %name, i32 %len, i32 %hash, i8 zeroext %extra, i32 %flags) noinline nounwind ssp !dbg !16 {
+define internal fastcc ptr @add_name_internal(ptr %name, i32 %len, i32 %hash, i8 zeroext %extra, i32 %flags) noinline nounwind ssp !dbg !16 {
;
entry:
- call void @llvm.dbg.value(metadata i8* %name, metadata !15, metadata !DIExpression()), !dbg !DILocation(scope: !16)
+ call void @llvm.dbg.value(metadata ptr %name, metadata !15, metadata !DIExpression()), !dbg !DILocation(scope: !16)
call void @llvm.dbg.value(metadata i32 %len, metadata !20, metadata !DIExpression()), !dbg !DILocation(scope: !16)
call void @llvm.dbg.value(metadata i32 %hash, metadata !21, metadata !DIExpression()), !dbg !DILocation(scope: !16)
call void @llvm.dbg.value(metadata i8 %extra, metadata !22, metadata !DIExpression()), !dbg !DILocation(scope: !16)
br label %bb2, !dbg !27
bb2: ; preds = %bb1, %bb
- %.0 = phi i8* [ getelementptr inbounds ([1 x i8], [1 x i8]* @.str, i64 0, i64 0), %bb ], [ %name, %bb1 ] ; <i8*> [#uses=1]
- ret i8* %.0, !dbg !27
+ %.0 = phi ptr [ @.str, %bb ], [ %name, %bb1 ] ; <ptr> [#uses=1]
+ ret ptr %.0, !dbg !27
}
declare void @llvm.dbg.value(metadata, metadata, metadata) nounwind readnone
; RUN: opt %s -passes=deadargelim -S | FileCheck %s
-@block_addr = global i8* blockaddress(@varargs_func, %l1)
-; CHECK: @block_addr = global i8* blockaddress(@varargs_func, %l1)
+@block_addr = global ptr blockaddress(@varargs_func, %l1)
+; CHECK: @block_addr = global ptr blockaddress(@varargs_func, %l1)
; This function is referenced by a "blockaddress" constant but it is
; not address-taken, so the pass should be able to remove its unused
; varargs.
-define internal i32 @varargs_func(i8* %addr, ...) {
- indirectbr i8* %addr, [ label %l1, label %l2 ]
+define internal i32 @varargs_func(ptr %addr, ...) {
+ indirectbr ptr %addr, [ label %l1, label %l2 ]
l1:
ret i32 1
l2:
ret i32 2
}
-; CHECK: define internal i32 @varargs_func(i8* %addr) {
+; CHECK: define internal i32 @varargs_func(ptr %addr) {
-define i32 @caller(i8* %addr) {
- %r = call i32 (i8*, ...) @varargs_func(i8* %addr)
+define i32 @caller(ptr %addr) {
+ %r = call i32 (ptr, ...) @varargs_func(ptr %addr)
ret i32 %r
}
-; CHECK: %r = call i32 @varargs_func(i8* %addr)
+; CHECK: %r = call i32 @varargs_func(ptr %addr)
; attributes that imply immediate undefined behavior must be deleted.
; Other attributes like nonnull, which only imply poison, can be safely kept.
-; CHECK: define i64 @bar(i64* nonnull %0, i64 %1)
-define i64 @bar(i64* nonnull dereferenceable(8) %0, i64 %1) {
+; CHECK: define i64 @bar(ptr nonnull %0, i64 %1)
+define i64 @bar(ptr nonnull dereferenceable(8) %0, i64 %1) {
entry:
%2 = add i64 %1, 8
ret i64 %2
}
-define i64 @foo(i64* %p, i64 %v) {
-; CHECK: %retval = call i64 @bar(i64* nonnull poison, i64 %v)
- %retval = call i64 @bar(i64* nonnull dereferenceable(8) %p, i64 %v)
+define i64 @foo(ptr %p, i64 %v) {
+; CHECK: %retval = call i64 @bar(ptr nonnull poison, i64 %v)
+ %retval = call i64 @bar(ptr nonnull dereferenceable(8) %p, i64 %v)
ret i64 %retval
}
; be reduced. There is scope for further optimisation here (though not visible
; in this test-case).
-; CHECK-LABEL: define internal { i8*, i32 } @inner()
+; CHECK-LABEL: define internal { ptr, i32 } @inner()
-define internal {i8*, i32} @mid() {
- %res = call {i8*, i32} @inner()
- %intval = extractvalue {i8*, i32} %res, 1
+define internal {ptr, i32} @mid() {
+ %res = call {ptr, i32} @inner()
+ %intval = extractvalue {ptr, i32} %res, 1
%tst = icmp eq i32 %intval, 42
br i1 %tst, label %true, label %true
true:
- ret {i8*, i32} %res
+ ret {ptr, i32} %res
}
-define internal {i8*, i32} @inner() {
- ret {i8*, i32} {i8* null, i32 42}
+define internal {ptr, i32} @inner() {
+ ret {ptr, i32} {ptr null, i32 42}
}
define internal i8 @outer() {
- %res = call {i8*, i32} @mid()
- %resptr = extractvalue {i8*, i32} %res, 0
+ %res = call {ptr, i32} @mid()
+ %resptr = extractvalue {ptr, i32} %res, 0
- %val = load i8, i8* %resptr
+ %val = load i8, ptr %resptr
ret i8 %val
}
; CHECK: %[[invoke:.*]] = invoke i32 @agg_ret()
; CHECK: %[[oldret:.*]] = insertvalue { i32 } poison, i32 %[[invoke]], 0
; CHECK: phi { i32 } [ %[[oldret]],
-define void @PR24906() personality i32 (i32)* poison {
+define void @PR24906() personality ptr poison {
entry:
%tmp2 = invoke { i32 } @agg_ret()
to label %bb3 unwind label %bb4
unreachable
bb4:
- %tmp4 = landingpad { i8*, i32 }
+ %tmp4 = landingpad { ptr, i32 }
cleanup
unreachable
}
; RUN: opt < %s -passes=deadargelim -S | not grep DEADARG
; test - an obviously dead argument
-define internal i32 @test(i32 %v, i32 %DEADARG1, i32* %p) {
- store i32 %v, i32* %p
+define internal i32 @test(i32 %v, i32 %DEADARG1, ptr %p) {
+ store i32 %v, ptr %p
ret i32 %v
}
; hardertest - an argument which is only used by a call of a function with a
; dead argument.
define internal i32 @hardertest(i32 %DEADARG2) {
- %p = alloca i32 ; <i32*> [#uses=1]
- %V = call i32 @test( i32 5, i32 %DEADARG2, i32* %p ) ; <i32> [#uses=1]
+ %p = alloca i32 ; <ptr> [#uses=1]
+ %V = call i32 @test( i32 5, i32 %DEADARG2, ptr %p ) ; <i32> [#uses=1]
ret i32 %V
}
declare void @sideeffect()
-define void @unused_byref_arg(i32* byref(i32) %dead_arg) {
+define void @unused_byref_arg(ptr byref(i32) %dead_arg) {
; CHECK-LABEL: @unused_byref_arg(
; CHECK-NEXT: tail call void @sideeffect()
; CHECK-NEXT: ret void
ret void
}
-define void @dont_replace_by_poison(i32* %ptr) {
+define void @dont_replace_by_poison(ptr %ptr) {
; CHECK-LABEL: @dont_replace_by_poison(
-; CHECK-NEXT: call void @unused_byref_arg(i32* byref(i32) poison)
+; CHECK-NEXT: call void @unused_byref_arg(ptr byref(i32) poison)
; CHECK-NEXT: ret void
;
call void @unused_byref_arg(i32* byref(i32) %ptr)
entry:
call void @llvm.dbg.value(metadata i32 %m, metadata !12, metadata !DIExpression()), !dbg !21
call void @llvm.dbg.value(metadata i32 %n, metadata !13, metadata !DIExpression()), !dbg !22
- call void @llvm.dbg.value(metadata %struct.Channel* null, metadata !14, metadata !DIExpression()), !dbg !23
- %call = call %struct.Channel* (...) @foo(), !dbg !24
- call void @llvm.dbg.value(metadata %struct.Channel* %call, metadata !14, metadata !DIExpression()), !dbg !23
+ call void @llvm.dbg.value(metadata ptr null, metadata !14, metadata !DIExpression()), !dbg !23
+ %call = call ptr (...) @foo(), !dbg !24
+ call void @llvm.dbg.value(metadata ptr %call, metadata !14, metadata !DIExpression()), !dbg !23
%cmp = icmp sgt i32 %m, 3, !dbg !25
br i1 %cmp, label %if.then, label %if.end, !dbg !27
if.then: ; preds = %entry
- %call1 = call zeroext i1 @f1(i1 zeroext true, %struct.Channel* %call), !dbg !28
+ %call1 = call zeroext i1 @f1(i1 zeroext true, ptr %call), !dbg !28
br label %if.end, !dbg !28
if.end: ; preds = %if.then, %entry
br i1 %cmp2, label %if.then3, label %if.end5, !dbg !31
if.then3: ; preds = %if.end
- %call4 = call zeroext i1 @f1(i1 zeroext false, %struct.Channel* %call), !dbg !32
+ %call4 = call zeroext i1 @f1(i1 zeroext false, ptr %call), !dbg !32
br label %if.end5, !dbg !32
if.end5: ; preds = %if.then3, %if.end
ret void, !dbg !33
}
-declare %struct.Channel* @foo(...) local_unnamed_addr #1
+declare ptr @foo(...) local_unnamed_addr #1
; Function Attrs: noinline nounwind uwtable
-define internal zeroext i1 @f1(i1 zeroext %is_y, %struct.Channel* %str) #4 !dbg !34 {
+define internal zeroext i1 @f1(i1 zeroext %is_y, ptr %str) #4 !dbg !34 {
entry:
%frombool = zext i1 %is_y to i8
; CHECK: call void @llvm.dbg.value(metadata i1 %is_y, metadata !39, metadata !DIExpression()), !dbg !42
call void @llvm.dbg.value(metadata i1 %is_y, metadata !39, metadata !DIExpression()), !dbg !42
-; CHECK: call void @llvm.dbg.value(metadata %struct.Channel* %str, metadata !40, metadata !DIExpression()), !dbg !43
- call void @llvm.dbg.value(metadata %struct.Channel* %str, metadata !40, metadata !DIExpression()), !dbg !43
- call void @llvm.dbg.value(metadata %struct.Channel* null, metadata !41, metadata !DIExpression()), !dbg !44
- %tobool = icmp ne %struct.Channel* %str, null, !dbg !45
+; CHECK: call void @llvm.dbg.value(metadata ptr %str, metadata !40, metadata !DIExpression()), !dbg !43
+ call void @llvm.dbg.value(metadata ptr %str, metadata !40, metadata !DIExpression()), !dbg !43
+ call void @llvm.dbg.value(metadata ptr null, metadata !41, metadata !DIExpression()), !dbg !44
+ %tobool = icmp ne ptr %str, null, !dbg !45
br i1 %tobool, label %if.end, label %if.then, !dbg !47
if.then: ; preds = %entry
br label %cleanup, !dbg !50
if.end: ; preds = %entry
- %call = call %struct.Channel* (...) @foo(), !dbg !51
- call void @llvm.dbg.value(metadata %struct.Channel* %call, metadata !41, metadata !DIExpression()), !dbg !44
+ %call = call ptr (...) @foo(), !dbg !51
+ call void @llvm.dbg.value(metadata ptr %call, metadata !41, metadata !DIExpression()), !dbg !44
%tobool1 = trunc i8 %frombool to i1, !dbg !52
br i1 %tobool1, label %if.then2, label %if.end3, !dbg !56
entry:
; CHECK: call void @llvm.dbg.value(metadata i32 poison, metadata !15, metadata !DIExpression()), !dbg !16
call void @llvm.dbg.value(metadata i32 %k, metadata !15, metadata !DIExpression()), !dbg !16
- %0 = load i32, i32* @s, align 4, !dbg !17
+ %0 = load i32, ptr @s, align 4, !dbg !17
%inc = add nsw i32 %0, 1, !dbg !17
- store i32 %inc, i32* @s, align 4, !dbg !17
- call void @llvm.dbg.value(metadata i32* @s, metadata !15, metadata !DIExpression(DW_OP_deref)), !dbg !16
+ store i32 %inc, ptr @s, align 4, !dbg !17
+ call void @llvm.dbg.value(metadata ptr @s, metadata !15, metadata !DIExpression(DW_OP_deref)), !dbg !16
ret void, !dbg !18
}
define internal i32 @has_vastart(i32 %X, ...) {
%valist = alloca i8
- call void @llvm.va_start(i8* %valist)
+ call void @llvm.va_start(ptr %valist)
ret i32 %X
}
; CHECK-LABEL: define internal i32 @has_vastart(i32 %X, ...)
-declare void @llvm.va_start(i8*)
+declare void @llvm.va_start(ptr)
define internal i32 @no_vastart(i32 %X, ...) {
ret i32 %X
define void @h() {
entry:
%i = alloca i32, align 4
- store volatile i32 10, i32* %i, align 4
-; CHECK: %tmp = load volatile i32, i32* %i, align 4
+ store volatile i32 10, ptr %i, align 4
+; CHECK: %tmp = load volatile i32, ptr %i, align 4
; CHECK-NEXT: call void @f(i32 poison)
- %tmp = load volatile i32, i32* %i, align 4
+ %tmp = load volatile i32, ptr %i, align 4
call void @f(i32 %tmp)
ret void
}
%swift_error = type opaque
-define void @unused_swifterror_arg(%swift_error** swifterror %dead_arg) {
+define void @unused_swifterror_arg(ptr swifterror %dead_arg) {
tail call void @sideeffect() nounwind
ret void
}
; CHECK-LABEL: @dont_replace_by_poison
; CHECK-NOT: call void @unused_swifterror_arg({{.*}}poison)
define void @dont_replace_by_poison() {
- %error_ptr_ref = alloca swifterror %swift_error*
- store %swift_error* null, %swift_error** %error_ptr_ref
- call void @unused_swifterror_arg(%swift_error** %error_ptr_ref)
+ %error_ptr_ref = alloca swifterror ptr
+ store ptr null, ptr %error_ptr_ref
+ call void @unused_swifterror_arg(ptr %error_ptr_ref)
ret void
}
; The callee function's return type shouldn't be changed if the call result is
; used.
-; CHECK-LABEL: define internal i8* @callee4()
+; CHECK-LABEL: define internal ptr @callee4()
-define internal i8* @callee4(i8* %a0) {
- ret i8* @g0;
+define internal ptr @callee4(ptr %a0) {
+ ret ptr @g0;
}
declare void @llvm.objc.clang.arc.noop.use(...)
-; CHECK-LABEL: define i8* @test4(
-; CHECK: tail call i8* @callee4() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
+; CHECK-LABEL: define ptr @test4(
+; CHECK: tail call ptr @callee4() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
-define i8* @test4() {
- %call = tail call i8* @callee4(i8* @g0) [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
- call void (...) @llvm.objc.clang.arc.noop.use(i8* %call)
- ret i8* @g0
+define ptr @test4() {
+ %call = tail call ptr @callee4(ptr @g0) [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
+ call void (...) @llvm.objc.clang.arc.noop.use(ptr %call)
+ ret ptr @g0
}
-declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
+declare ptr @llvm.objc.retainAutoreleasedReturnValue(ptr)
; RUN: cat %t | not grep DEAD
; RUN: cat %t | grep LIVE | count 4
-@P = external global i32 ; <i32*> [#uses=1]
+@P = external global i32 ; <ptr> [#uses=1]
; Dead arg only used by dead retval
define internal i32 @test(i32 %DEADARG) {
}
define internal i32 @foo() {
- %DEAD = load i32, i32* @P ; <i32> [#uses=1]
+ %DEAD = load i32, ptr @P ; <i32> [#uses=1]
ret i32 %DEAD
}
; statically known, by replacing the related arguments with poison.
; This is what we check on the call that produces %res2.
-define i32 @call_indirect(i32 (i32, i32, i32)* readnone %fct_ptr, i32 %arg1, i32 %arg2, i32 %arg3) {
+define i32 @call_indirect(ptr readnone %fct_ptr, i32 %arg1, i32 %arg2, i32 %arg3) {
; CHECK-LABEL: @call_indirect(
-; CHECK-NEXT: [[CMP0:%.*]] = icmp eq i32 (i32, i32, i32)* [[FCT_PTR:%.*]], @external_fct
+; CHECK-NEXT: [[CMP0:%.*]] = icmp eq ptr [[FCT_PTR:%.*]], @external_fct
; CHECK-NEXT: br i1 [[CMP0]], label [[CALL_EXT:%.*]], label [[CHK2:%.*]]
; CHECK: call_ext:
; CHECK-NEXT: [[RES1:%.*]] = tail call i32 @external_fct(i32 poison, i32 [[ARG2:%.*]], i32 poison)
; CHECK-NEXT: br label [[END:%.*]]
; CHECK: chk2:
-; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 (i32, i32, i32)* [[FCT_PTR]], @internal_fct
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[FCT_PTR]], @internal_fct
; CHECK-NEXT: br i1 [[CMP1]], label [[CALL_INT:%.*]], label [[CALL_OTHER:%.*]]
; CHECK: call_int:
; CHECK-NEXT: [[RES2:%.*]] = tail call i32 @internal_fct(i32 poison, i32 [[ARG2]], i32 poison)
br label %end
chk2:
- %cmp1 = icmp eq i32 (i32, i32, i32)* %fct_ptr, @internal_fct
+ %cmp1 = icmp eq ptr %fct_ptr, @internal_fct
br i1 %cmp1, label %call_int, label %call_other
call_int:
define internal i32 @va_func(i32 %num, ...) !prof !28 !PGOFuncName !29{
; CHECK: define internal void @va_func(i32 %num) !prof ![[ENTRYCOUNT:[0-9]+]] !PGOFuncName ![[PGOFUNCNAME1:[0-9]+]] {
entry:
- %0 = load i32, i32* @s, align 4, !tbaa !31
+ %0 = load i32, ptr @s, align 4, !tbaa !31
%add = add nsw i32 %0, %num
- store i32 %add, i32* @s, align 4, !tbaa !31
+ store i32 %add, ptr @s, align 4, !tbaa !31
ret i32 0
}
define internal fastcc i32 @foo() unnamed_addr !prof !28 !PGOFuncName !30 {
; CHECK: define internal fastcc void @foo() unnamed_addr !prof ![[ENTRYCOUNT:[0-9]+]] !PGOFuncName ![[PGOFUNCNAME2:[0-9]+]] {
entry:
- %0 = load i32, i32* @s, align 4, !tbaa !31
+ %0 = load i32, ptr @s, align 4, !tbaa !31
%add = add nsw i32 %0, 8
- store i32 %add, i32* @s, align 4, !tbaa !31
+ store i32 %add, ptr @s, align 4, !tbaa !31
ret i32 0
}
; RUN: opt -S -passes=deadargelim < %s | FileCheck %s
target triple = "x86_64-pc-windows-msvc"
-define internal void @callee(i8*) {
+define internal void @callee(ptr) {
entry:
call void @thunk()
ret void
}
-define void @test1() personality i32 (...)* @__CxxFrameHandler3 {
+define void @test1() personality ptr @__CxxFrameHandler3 {
entry:
invoke void @thunk()
to label %good1 unwind label %bad1
bad1: ; preds = %entry-block
%pad1 = cleanuppad within none []
- call void @callee(i8* null) [ "funclet"(token %pad1) ]
+ call void @callee(ptr null) [ "funclet"(token %pad1) ]
cleanupret from %pad1 unwind to caller
}
; CHECK-LABEL: define void @test1(
; RUN: opt < %s -passes=deadargelim -S | FileCheck %s
declare token @llvm.call.preallocated.setup(i32)
-declare i8* @llvm.call.preallocated.arg(token, i32)
+declare ptr @llvm.call.preallocated.arg(token, i32)
%Ty = type <{ i32, i32 }>
; We can't remove 'this' here, as that would put argmem in ecx instead of
; memory.
-define internal x86_thiscallcc i32 @unused_this(i32* %this, i32* inalloca(i32) %argmem) {
+define internal x86_thiscallcc i32 @unused_this(ptr %this, ptr inalloca(i32) %argmem) {
;
;
- %v = load i32, i32* %argmem
+ %v = load i32, ptr %argmem
ret i32 %v
}
-; CHECK-LABEL: define internal x86_thiscallcc i32 @unused_this(i32* %this, i32* inalloca(i32) %argmem)
+; CHECK-LABEL: define internal x86_thiscallcc i32 @unused_this(ptr %this, ptr inalloca(i32) %argmem)
define i32 @caller2() {
;
;
%t = alloca i32
%m = alloca inalloca i32
- store i32 42, i32* %m
- %v = call x86_thiscallcc i32 @unused_this(i32* %t, i32* inalloca(i32) %m)
+ store i32 42, ptr %m
+ %v = call x86_thiscallcc i32 @unused_this(ptr %t, ptr inalloca(i32) %m)
ret i32 %v
}
; We can't remove 'this' here, as that would put argmem in ecx instead of
; memory.
-define internal x86_thiscallcc i32 @unused_this_preallocated(i32* %this, i32* preallocated(i32) %argmem) {
+define internal x86_thiscallcc i32 @unused_this_preallocated(ptr %this, ptr preallocated(i32) %argmem) {
;
;
- %v = load i32, i32* %argmem
+ %v = load i32, ptr %argmem
ret i32 %v
}
-; CHECK-LABEL: define internal x86_thiscallcc i32 @unused_this_preallocated(i32* %this, i32* preallocated(i32) %argmem)
+; CHECK-LABEL: define internal x86_thiscallcc i32 @unused_this_preallocated(ptr %this, ptr preallocated(i32) %argmem)
define i32 @caller3() {
;
;
%t = alloca i32
%c = call token @llvm.call.preallocated.setup(i32 1)
- %M = call i8* @llvm.call.preallocated.arg(token %c, i32 0) preallocated(i32)
- %m = bitcast i8* %M to i32*
- store i32 42, i32* %m
- %v = call x86_thiscallcc i32 @unused_this_preallocated(i32* %t, i32* preallocated(i32) %m) ["preallocated"(token %c)]
+ %M = call ptr @llvm.call.preallocated.arg(token %c, i32 0) preallocated(i32)
+ store i32 42, ptr %M
+ %v = call x86_thiscallcc i32 @unused_this_preallocated(ptr %t, ptr preallocated(i32) %M) ["preallocated"(token %c)]
ret i32 %v
}
; rdar://11546243
%struct.A = type { i8 }
-define available_externally void @_Z17externallyDefinedP1A(%struct.A* %a) {
+define available_externally void @_Z17externallyDefinedP1A(ptr %a) {
entry:
call void @_Z3foov()
ret void
declare void @_Z3foov()
-define void @_Z4testP1A(%struct.A* %a) {
+define void @_Z4testP1A(ptr %a) {
; CHECK: @_Z4testP1A
-; CHECK: @_Z17externallyDefinedP1A(%struct.A* %a)
+; CHECK: @_Z17externallyDefinedP1A(ptr %a)
entry:
- call void @_Z17externallyDefinedP1A(%struct.A* %a)
+ call void @_Z17externallyDefinedP1A(ptr %a)
ret void
}
declare dso_local i32 @test(i64, i64)
-define i32 @main(i32 %argc, i8** %argv) {
+define i32 @main(i32 %argc, ptr %argv) {
start:
%x = call { i64, i64 } @g(i64 13, i64 42)
%x.0 = extractvalue { i64, i64 } %x, 0
; Validate that the argument and return value are both dead
; CHECK-LABEL: define internal void @test1()
-define internal %Ty* @test1(%Ty* %this) {
- ret %Ty* %this
+define internal ptr @test1(ptr %this) {
+ ret ptr %this
}
; do not keep alive the return value of a function with a dead 'returned' argument
; CHECK-LABEL: define internal void @test2()
-define internal %Ty* @test2(%Ty* returned %this) {
- ret %Ty* %this
+define internal ptr @test2(ptr returned %this) {
+ ret ptr %this
}
; dummy to keep 'this' alive
-@dummy = global %Ty* null
+@dummy = global ptr null
; Validate that return value is dead
-; CHECK-LABEL: define internal void @test3(%Ty* %this)
+; CHECK-LABEL: define internal void @test3(ptr %this)
-define internal %Ty* @test3(%Ty* %this) {
- store volatile %Ty* %this, %Ty** @dummy
- ret %Ty* %this
+define internal ptr @test3(ptr %this) {
+ store volatile ptr %this, ptr @dummy
+ ret ptr %this
}
; keep alive return value of a function if the 'returned' argument is live
-; CHECK-LABEL: define internal %Ty* @test4(%Ty* returned %this)
+; CHECK-LABEL: define internal ptr @test4(ptr returned %this)
-define internal %Ty* @test4(%Ty* returned %this) {
- store volatile %Ty* %this, %Ty** @dummy
- ret %Ty* %this
+define internal ptr @test4(ptr returned %this) {
+ store volatile ptr %this, ptr @dummy
+ ret ptr %this
}
; don't do this if 'returned' is on the call site...
-; CHECK-LABEL: define internal void @test5(%Ty* %this)
+; CHECK-LABEL: define internal void @test5(ptr %this)
-define internal %Ty* @test5(%Ty* %this) {
- store volatile %Ty* %this, %Ty** @dummy
- ret %Ty* %this
+define internal ptr @test5(ptr %this) {
+ store volatile ptr %this, ptr @dummy
+ ret ptr %this
}
; Drop all these attributes
; CHECK-LABEL: define internal void @test6
-define internal align 8 dereferenceable_or_null(2) noundef noalias i8* @test6() {
- ret i8* null
+define internal align 8 dereferenceable_or_null(2) noundef noalias ptr @test6() {
+ ret ptr null
}
-define %Ty* @caller(%Ty* %this) {
- %1 = call %Ty* @test1(%Ty* %this)
- %2 = call %Ty* @test2(%Ty* %this)
- %3 = call %Ty* @test3(%Ty* %this)
- %4 = call %Ty* @test4(%Ty* %this)
+define ptr @caller(ptr %this) {
+ %1 = call ptr @test1(ptr %this)
+ %2 = call ptr @test2(ptr %this)
+ %3 = call ptr @test3(ptr %this)
+ %4 = call ptr @test4(ptr %this)
; ...instead, drop 'returned' form the call site
-; CHECK: call void @test5(%Ty* %this)
- %5 = call %Ty* @test5(%Ty* returned %this)
- %6 = call i8* @test6()
- ret %Ty* %this
+; CHECK: call void @test5(ptr %this)
+ %5 = call ptr @test5(ptr returned %this)
+ %6 = call ptr @test6()
+ ret ptr %this
}
; RUN: opt < %s -passes=deadargelim -S | FileCheck %s
-declare void @llvm.va_start(i8*)
+declare void @llvm.va_start(ptr)
define internal i32 @va_func(i32 %a, i32 %b, ...) {
%valist = alloca i8
- call void @llvm.va_start(i8* %valist)
+ call void @llvm.va_start(ptr %valist)
ret i32 %b
}
; it.
define i32 @call_va(i32 %in) {
%stacked = alloca i32
- store i32 42, i32* %stacked
- %res = call i32(i32, i32, ...) @va_func(i32 %in, i32 %in, [6 x i32] poison, i32* byval(i32) %stacked)
+ store i32 42, ptr %stacked
+ %res = call i32(i32, i32, ...) @va_func(i32 %in, i32 %in, [6 x i32] poison, ptr byval(i32) %stacked)
ret i32 %res
-; CHECK: call i32 (i32, i32, ...) @va_func(i32 poison, i32 %in, [6 x i32] poison, i32* byval(i32) %stacked)
+; CHECK: call i32 (i32, i32, ...) @va_func(i32 poison, i32 %in, [6 x i32] poison, ptr byval(i32) %stacked)
}
define internal i32 @va_deadret_func(i32 %a, i32 %b, ...) {
%valist = alloca i8
- call void @llvm.va_start(i8* %valist)
+ call void @llvm.va_start(ptr %valist)
ret i32 %a
}
define void @call_deadret(i32 %in) {
%stacked = alloca i32
- store i32 42, i32* %stacked
- call i32 (i32, i32, ...) @va_deadret_func(i32 poison, i32 %in, [6 x i32] poison, i32* byval(i32) %stacked)
+ store i32 42, ptr %stacked
+ call i32 (i32, i32, ...) @va_deadret_func(i32 poison, i32 %in, [6 x i32] poison, ptr byval(i32) %stacked)
ret void
-; CHECK: call void (i32, i32, ...) @va_deadret_func(i32 poison, i32 poison, [6 x i32] poison, i32* byval(i32) %stacked)
+; CHECK: call void (i32, i32, ...) @va_deadret_func(i32 poison, i32 poison, [6 x i32] poison, ptr byval(i32) %stacked)
}
define i32 @f(i32 %x) #0 !dbg !7 {
entry:
%x.addr = alloca i32, align 4
- store i32 %x, i32* %x.addr, align 4
+ store i32 %x, ptr %x.addr, align 4
ret i32 42, !dbg !12
; CHECK-LABEL: define i32 @f(i32 %x)
-; CHECK: call i8* @llvm.returnaddress(i32 0), !dbg ![[ENTRYLOC:[0-9]+]]
+; CHECK: call ptr @llvm.returnaddress(i32 0), !dbg ![[ENTRYLOC:[0-9]+]]
; CHECK: call void @__cyg_profile_func_enter{{.*}}, !dbg ![[ENTRYLOC]]
-; CHECK: call i8* @llvm.returnaddress(i32 0), !dbg ![[EXITLOC:[0-9]+]]
+; CHECK: call ptr @llvm.returnaddress(i32 0), !dbg ![[EXITLOC:[0-9]+]]
; CHECK: call void @__cyg_profile_func_exit{{.*}}, !dbg ![[EXITLOC]]
; CHECK: ret i32 42, !dbg ![[EXITLOC]]
}
], !dbg !11
sw0: ; preds = %entry
- store i32 1, i32* @G, align 4, !dbg !12
+ store i32 1, ptr @G, align 4, !dbg !12
br label %exit, !dbg !13
sw1: ; preds = %entry
- store i32 1, i32* @G, align 4, !dbg !14
+ store i32 1, ptr @G, align 4, !dbg !14
br label %exit, !dbg !15
exit1: ; preds = %entry
- store i32 1, i32* @G, align 4, !dbg !16
+ store i32 1, ptr @G, align 4, !dbg !16
ret void, !dbg !17
exit: ; preds = %sw1, %sw0
; Check that the inlined loads are hoisted.
; CHECK-LABEL: define i32 @fun(
; CHECK-LABEL: entry:
-; CHECK: load i32, i32* @A
+; CHECK: load i32, ptr @A
; CHECK: if.then:
@A = external global i32
@E = external global i32
define i32 @loadA() {
- %a = load i32, i32* @A
+ %a = load i32, ptr @A
ret i32 %a
}
br i1 %c, label %if.then, label %if.else
if.then:
- store i32 1, i32* @B
+ store i32 1, ptr @B
%call1 = call i32 @loadA()
- store i32 2, i32* @C
+ store i32 2, ptr @C
br label %if.endif
if.else:
- store i32 2, i32* @D
+ store i32 2, ptr @D
%call2 = call i32 @loadA()
- store i32 1, i32* @E
+ store i32 1, ptr @E
br label %if.endif
if.endif:
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define void @test1(i1 %b, i32* %x) {
+define void @test1(i1 %b, ptr %x) {
entry:
br i1 %b, label %if.then, label %if.else
if.then: ; preds = %entry
- store i32 2, i32* %x, align 4, !tbaa !1
+ store i32 2, ptr %x, align 4, !tbaa !1
br label %if.end
if.else: ; preds = %entry
- store i32 2, i32* %x, align 4, !tbaa !5
+ store i32 2, ptr %x, align 4, !tbaa !5
br label %if.end
if.end: ; preds = %if.else, %if.then
ret void
}
; CHECK-LABEL: define void @test1(
-; CHECK: store i32 2, i32* %x, align 4
+; CHECK: store i32 2, ptr %x, align 4
; CHECK-NEXT: br i1 %b
-define void @test2(i1 %b, i32* %x) {
+define void @test2(i1 %b, ptr %x) {
entry:
br i1 %b, label %if.then, label %if.else
if.then: ; preds = %entry
- %gep1 = getelementptr inbounds i32, i32* %x, i64 1
- store i32 2, i32* %gep1, align 4, !tbaa !1
+ %gep1 = getelementptr inbounds i32, ptr %x, i64 1
+ store i32 2, ptr %gep1, align 4, !tbaa !1
br label %if.end
if.else: ; preds = %entry
- %gep2 = getelementptr inbounds i32, i32* %x, i64 1
- store i32 2, i32* %gep2, align 4, !tbaa !5
+ %gep2 = getelementptr inbounds i32, ptr %x, i64 1
+ store i32 2, ptr %gep2, align 4, !tbaa !5
br label %if.end
if.end: ; preds = %if.else, %if.then
ret void
}
; CHECK-LABEL: define void @test2(
-; CHECK: %[[gep:.*]] = getelementptr inbounds i32, i32* %x, i64 1
-; CHECK: store i32 2, i32* %[[gep]], align 4
+; CHECK: %[[gep:.*]] = getelementptr inbounds i32, ptr %x, i64 1
+; CHECK: store i32 2, ptr %[[gep]], align 4
; CHECK-NEXT: br i1 %b
-define void @test3(i1 %b, i32* %x) {
+define void @test3(i1 %b, ptr %x) {
entry:
br i1 %b, label %if.then, label %if.else
if.then: ; preds = %entry
- %gep1 = getelementptr inbounds i32, i32* %x, i64 1
- store i32 2, i32* %gep1, align 4, !tbaa !1
+ %gep1 = getelementptr inbounds i32, ptr %x, i64 1
+ store i32 2, ptr %gep1, align 4, !tbaa !1
br label %if.end
if.else: ; preds = %entry
- %gep2 = getelementptr i32, i32* %x, i64 1
- store i32 2, i32* %gep2, align 4, !tbaa !5
+ %gep2 = getelementptr i32, ptr %x, i64 1
+ store i32 2, ptr %gep2, align 4, !tbaa !5
br label %if.end
if.end: ; preds = %if.else, %if.then
ret void
}
; CHECK-LABEL: define void @test3(
-; CHECK: %[[gep:.*]] = getelementptr i32, i32* %x, i64 1
-; CHECK: store i32 2, i32* %[[gep]], align 4
+; CHECK: %[[gep:.*]] = getelementptr i32, ptr %x, i64 1
+; CHECK: store i32 2, ptr %[[gep]], align 4
; CHECK-NEXT: br i1 %b
!1 = !{!2, !2, i64 0}
!5 = !{!6, !6, i64 0}
!6 = !{!"_ZTS1e", !3, i64 0}
-define i32 @test4(i1 %b, i32* %y) {
+define i32 @test4(i1 %b, ptr %y) {
entry:
br i1 %b, label %if.then, label %if.end
if.then: ; preds = %entry
- %0 = load i32, i32* %y, align 4, !range !7
+ %0 = load i32, ptr %y, align 4, !range !7
br label %return
if.end: ; preds = %entry
- %1 = load i32, i32* %y, align 4, !range !8
+ %1 = load i32, ptr %y, align 4, !range !8
br label %return
return: ; preds = %if.end, %if.then
ret i32 %retval.0
}
; CHECK-LABEL: define i32 @test4(
-; CHECK: %[[load:.*]] = load i32, i32* %y, align 4, !range ![[range_md:.*]]
+; CHECK: %[[load:.*]] = load i32, ptr %y, align 4, !range ![[range_md:.*]]
; CHECK: %[[phi:.*]] = phi i32 [ %[[load]], %{{.*}} ], [ %[[load]], %{{.*}} ]
; CHECK: ret i32 %[[phi]]
-define i32* @test5(i1 %b, i32** %y) {
+define ptr @test5(i1 %b, ptr %y) {
entry:
br i1 %b, label %if.then, label %if.end
if.then: ; preds = %entry
- %0 = load i32*, i32** %y, align 4, !nonnull !9
+ %0 = load ptr, ptr %y, align 4, !nonnull !9
br label %return
if.end: ; preds = %entry
- %1 = load i32*, i32** %y, align 4
+ %1 = load ptr, ptr %y, align 4
br label %return
return: ; preds = %if.end, %if.then
- %retval.0 = phi i32* [ %0, %if.then ], [ %1, %if.end ]
- ret i32* %retval.0
+ %retval.0 = phi ptr [ %0, %if.then ], [ %1, %if.end ]
+ ret ptr %retval.0
}
-; CHECK-LABEL: define i32* @test5(
-; CHECK: %[[load:.*]] = load i32*, i32** %y, align 4
+; CHECK-LABEL: define ptr @test5(
+; CHECK: %[[load:.*]] = load ptr, ptr %y, align 4
; CHECK-NOT: !nonnull
-; CHECK: %[[phi:.*]] = phi i32* [ %[[load]], %{{.*}} ], [ %[[load]], %{{.*}} ]
-; CHECK: ret i32* %[[phi]]
+; CHECK: %[[phi:.*]] = phi ptr [ %[[load]], %{{.*}} ], [ %[[load]], %{{.*}} ]
+; CHECK: ret ptr %[[phi]]
!7 = !{i32 0, i32 2}
!8 = !{i32 3, i32 4}
]
sw0:
- store i32 1, i32* @G
+ store i32 1, ptr @G
br label %exit
sw1:
- store i32 1, i32* @G
+ store i32 1, ptr @G
br label %exit
exit1:
- store i32 1, i32* @G
+ store i32 1, ptr @G
ret void
exit:
ret void
br i1 undef, label %bb4, label %bb9
bb4: ; preds = %bb3
- %tmp = load i32, i32* @optind, align 4
+ %tmp = load i32, ptr @optind, align 4
br i1 undef, label %bb5, label %bb7
bb5: ; preds = %bb4
%tmp6 = add nsw i32 %tmp, 1
- store i32 %tmp6, i32* @optind, align 4
+ store i32 %tmp6, ptr @optind, align 4
br label %bb12
bb7: ; preds = %bb4
%tmp8 = add nsw i32 %tmp, 1
- store i32 %tmp8, i32* @optind, align 4
+ store i32 %tmp8, ptr @optind, align 4
br label %bb13
bb9: ; preds = %bb3
- %tmp10 = load i32, i32* @optind, align 4
+ %tmp10 = load i32, ptr @optind, align 4
%tmp11 = add nsw i32 %tmp10, 1
- store i32 %tmp11, i32* @optind, align 4
+ store i32 %tmp11, ptr @optind, align 4
br label %bb12
bb12: ; preds = %bb9, %bb5
; CHECK-NOT: store float
define float @hoistStoresUpdateMSSA(float %d) {
entry:
- store float 0.000000e+00, float* @GlobalVar
+ store float 0.000000e+00, ptr @GlobalVar
%cmp = fcmp oge float %d, 0.000000e+00
br i1 %cmp, label %if.then, label %if.end
if.then:
- store float 0.000000e+00, float* @GlobalVar
+ store float 0.000000e+00, ptr @GlobalVar
br label %if.end
if.end:
- %tmp = load float, float* @GlobalVar, align 4
+ %tmp = load float, ptr @GlobalVar, align 4
ret float %tmp
}
; CHECK-NOT: load
; CHECK-NOT: fmul
; CHECK-NOT: fsub
-define float @dominatorHoisting(float %d, float* %min, float* %max, float* %a) {
+define float @dominatorHoisting(float %d, ptr %min, ptr %max, ptr %a) {
entry:
%div = fdiv float 1.000000e+00, %d
- %0 = load float, float* %min, align 4
- %1 = load float, float* %a, align 4
+ %0 = load float, ptr %min, align 4
+ %1 = load float, ptr %a, align 4
%sub = fsub float %0, %1
%mul = fmul float %sub, %div
- %2 = load float, float* %max, align 4
+ %2 = load float, ptr %max, align 4
%sub1 = fsub float %2, %1
%mul2 = fmul float %sub1, %div
%cmp = fcmp oge float %div, 0.000000e+00
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
- %3 = load float, float* %max, align 4
- %4 = load float, float* %a, align 4
+ %3 = load float, ptr %max, align 4
+ %4 = load float, ptr %a, align 4
%sub3 = fsub float %3, %4
%mul4 = fmul float %sub3, %div
- %5 = load float, float* %min, align 4
+ %5 = load float, ptr %min, align 4
%sub5 = fsub float %5, %4
%mul6 = fmul float %sub5, %div
br label %if.end
; CHECK-NOT: load
; CHECK-NOT: fmul
; CHECK-NOT: fsub
-define float @domHoisting(float %d, float* %min, float* %max, float* %a) {
+define float @domHoisting(float %d, ptr %min, ptr %max, ptr %a) {
entry:
%div = fdiv float 1.000000e+00, %d
- %0 = load float, float* %min, align 4
- %1 = load float, float* %a, align 4
+ %0 = load float, ptr %min, align 4
+ %1 = load float, ptr %a, align 4
%sub = fsub float %0, %1
%mul = fmul float %sub, %div
- %2 = load float, float* %max, align 4
+ %2 = load float, ptr %max, align 4
%sub1 = fsub float %2, %1
%mul2 = fmul float %sub1, %div
%cmp = fcmp oge float %div, 0.000000e+00
br i1 %cmp, label %if.then, label %if.else
if.then:
- %3 = load float, float* %max, align 4
- %4 = load float, float* %a, align 4
+ %3 = load float, ptr %max, align 4
+ %4 = load float, ptr %a, align 4
%sub3 = fsub float %3, %4
%mul4 = fmul float %sub3, %div
- %5 = load float, float* %min, align 4
+ %5 = load float, ptr %min, align 4
%sub5 = fsub float %5, %4
%mul6 = fmul float %sub5, %div
br label %if.end
if.else:
- %6 = load float, float* %max, align 4
- %7 = load float, float* %a, align 4
+ %6 = load float, ptr %max, align 4
+ %7 = load float, ptr %a, align 4
%sub9 = fsub float %6, %7
%mul10 = fmul float %sub9, %div
- %8 = load float, float* %min, align 4
+ %8 = load float, ptr %min, align 4
%sub12 = fsub float %8, %7
%mul13 = fmul float %sub12, %div
br label %if.end
; CHECK: or i32
; CHECK-NOT: or i32
-define i8* @encode(i8* %p, i32 %v) {
+define ptr @encode(ptr %p, i32 %v) {
entry:
- %p.addr = alloca i8*, align 8
+ %p.addr = alloca ptr, align 8
%v.addr = alloca i32, align 4
- store i8* %p, i8** %p.addr, align 8
- store i32 %v, i32* %v.addr, align 4
- %0 = load i32, i32* %v.addr, align 4
+ store ptr %p, ptr %p.addr, align 8
+ store i32 %v, ptr %v.addr, align 4
+ %0 = load i32, ptr %v.addr, align 4
%cmp = icmp ult i32 %0, 23
br i1 %cmp, label %if.then, label %if.else
if.then: ; preds = %entry
- %1 = load i32, i32* %v.addr, align 4
+ %1 = load i32, ptr %v.addr, align 4
%or = or i32 %1, 128
%conv = trunc i32 %or to i8
- %2 = load i8*, i8** %p.addr, align 8
- %incdec.ptr = getelementptr inbounds i8, i8* %2, i32 1
- store i8* %incdec.ptr, i8** %p.addr, align 8
- store i8 %conv, i8* %2, align 1
+ %2 = load ptr, ptr %p.addr, align 8
+ %incdec.ptr = getelementptr inbounds i8, ptr %2, i32 1
+ store ptr %incdec.ptr, ptr %p.addr, align 8
+ store i8 %conv, ptr %2, align 1
br label %if.end15
if.else: ; preds = %entry
- %3 = load i32, i32* %v.addr, align 4
+ %3 = load i32, ptr %v.addr, align 4
%cmp1 = icmp ult i32 %3, 42
br i1 %cmp1, label %if.then3, label %if.else9
if.then3: ; preds = %if.else
- %4 = load i32, i32* %v.addr, align 4
+ %4 = load i32, ptr %v.addr, align 4
%or4 = or i32 %4, 128
%conv5 = trunc i32 %or4 to i8
- %5 = load i8*, i8** %p.addr, align 8
- %incdec.ptr6 = getelementptr inbounds i8, i8* %5, i32 1
- store i8* %incdec.ptr6, i8** %p.addr, align 8
- store i8 %conv5, i8* %5, align 1
- %6 = load i32, i32* %v.addr, align 4
+ %5 = load ptr, ptr %p.addr, align 8
+ %incdec.ptr6 = getelementptr inbounds i8, ptr %5, i32 1
+ store ptr %incdec.ptr6, ptr %p.addr, align 8
+ store i8 %conv5, ptr %5, align 1
+ %6 = load i32, ptr %v.addr, align 4
%conv7 = trunc i32 %6 to i8
- %7 = load i8*, i8** %p.addr, align 8
- %incdec.ptr8 = getelementptr inbounds i8, i8* %7, i32 1
- store i8* %incdec.ptr8, i8** %p.addr, align 8
- store i8 %conv7, i8* %7, align 1
+ %7 = load ptr, ptr %p.addr, align 8
+ %incdec.ptr8 = getelementptr inbounds i8, ptr %7, i32 1
+ store ptr %incdec.ptr8, ptr %p.addr, align 8
+ store i8 %conv7, ptr %7, align 1
br label %if.end
if.else9: ; preds = %if.else
- %8 = load i32, i32* %v.addr, align 4
+ %8 = load i32, ptr %v.addr, align 4
%or10 = or i32 %8, 128
%conv11 = trunc i32 %or10 to i8
- %9 = load i8*, i8** %p.addr, align 8
- %incdec.ptr12 = getelementptr inbounds i8, i8* %9, i32 1
- store i8* %incdec.ptr12, i8** %p.addr, align 8
- store i8 %conv11, i8* %9, align 1
- %10 = load i32, i32* %v.addr, align 4
+ %9 = load ptr, ptr %p.addr, align 8
+ %incdec.ptr12 = getelementptr inbounds i8, ptr %9, i32 1
+ store ptr %incdec.ptr12, ptr %p.addr, align 8
+ store i8 %conv11, ptr %9, align 1
+ %10 = load i32, ptr %v.addr, align 4
%shr = lshr i32 %10, 7
%conv13 = trunc i32 %shr to i8
- %11 = load i8*, i8** %p.addr, align 8
- %incdec.ptr14 = getelementptr inbounds i8, i8* %11, i32 1
- store i8* %incdec.ptr14, i8** %p.addr, align 8
- store i8 %conv13, i8* %11, align 1
+ %11 = load ptr, ptr %p.addr, align 8
+ %incdec.ptr14 = getelementptr inbounds i8, ptr %11, i32 1
+ store ptr %incdec.ptr14, ptr %p.addr, align 8
+ store i8 %conv13, ptr %11, align 1
br label %if.end
if.end: ; preds = %if.else9, %if.then3
br label %if.end15
if.end15: ; preds = %if.end, %if.then
- %12 = load i8*, i8** %p.addr, align 8
- ret i8* %12
+ %12 = load ptr, ptr %p.addr, align 8
+ ret ptr %12
}
; CHECK: sub i64
; CHECK-NOT: sub i64
-define i64 @fun(i8* %out, i8* %end) {
- %1 = icmp ult i8* %out, %end
+define i64 @fun(ptr %out, ptr %end) {
+ %1 = icmp ult ptr %out, %end
br i1 %1, label %2, label %6
; <label>:2 ; preds = %0
- %3 = ptrtoint i8* %end to i64
- %4 = ptrtoint i8* %out to i64
+ %3 = ptrtoint ptr %end to i64
+ %4 = ptrtoint ptr %out to i64
%5 = sub i64 %3, %4
br label %10
; <label>:6 ; preds = %0
- %7 = ptrtoint i8* %out to i64
- %8 = ptrtoint i8* %end to i64
+ %7 = ptrtoint ptr %out to i64
+ %8 = ptrtoint ptr %end to i64
%9 = sub i64 %8, %7
br label %10
define void @test_it() {
bb2:
- store i16 undef, i16* getelementptr inbounds (%rec894.0.1.2.3.12, %rec894.0.1.2.3.12* @a, i16 0, i32 0), align 1
- %_tmp61 = load i16, i16* getelementptr inbounds (%rec894.0.1.2.3.12, %rec894.0.1.2.3.12* @a, i16 0, i32 0), align 1
- store i16 undef, i16* getelementptr inbounds (%rec894.0.1.2.3.12, %rec894.0.1.2.3.12* @a, i16 0, i32 0), align 1
- %_tmp92 = load i16, i16* getelementptr inbounds (%rec894.0.1.2.3.12, %rec894.0.1.2.3.12* @a, i16 0, i32 0), align 1
+ store i16 undef, ptr @a, align 1
+ %_tmp61 = load i16, ptr @a, align 1
+ store i16 undef, ptr @a, align 1
+ %_tmp92 = load i16, ptr @a, align 1
ret void
}
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-@input = local_unnamed_addr global i32* null, align 8
+@input = local_unnamed_addr global ptr null, align 8
; Check that the load instruction is **not** hoisted
; CHECK-LABEL: @_Z3fooPii
; CHECK: load
; CHECK-LABEL: @main
-define i32 @_Z3fooPii(i32* %p, i32 %x) local_unnamed_addr {
+define i32 @_Z3fooPii(ptr %p, i32 %x) local_unnamed_addr {
entry:
- %cmp.not = icmp eq i32* %p, null
+ %cmp.not = icmp eq ptr %p, null
br i1 %cmp.not, label %if.end3, label %if.then
if.then: ; preds = %entry
- %0 = load i32, i32* %p, align 4, !tbaa !3
+ %0 = load i32, ptr %p, align 4, !tbaa !3
%add = add nsw i32 %0, %x
%cmp1 = icmp eq i32 %add, 4
br i1 %cmp1, label %if2, label %if.end3
if2: ; preds = %if.end3, %if.then
%x.addr.1 = phi i32 [ 4, %if.then ], [ %x.addr.0, %if.end3 ]
%y.0 = phi i32 [ 2, %if.then ], [ %add4, %if.end3 ]
- %1 = load i32, i32* %p, align 4, !tbaa !3
+ %1 = load i32, ptr %p, align 4, !tbaa !3
%add7 = add nsw i32 %x.addr.1, %1
%cmp8 = icmp eq i32 %add7, 5
br i1 %cmp8, label %end, label %if.end11
define i32 @main() local_unnamed_addr {
entry:
- %0 = load i32*, i32** @input, align 8, !tbaa !7
- %call = call i32 @_Z3fooPii(i32* %0, i32 0)
+ %0 = load ptr, ptr @input, align 8, !tbaa !7
+ %call = call i32 @_Z3fooPii(ptr %0, i32 0)
ret i32 %call
}
; in non-trivial cases.
; CHECK: if.else218:
-; CHECK-NEXT: %0 = getelementptr inbounds %s, %s* undef, i32 0, i32 0
-; CHECK-NEXT: %1 = load i32, i32* %0, align 4
+; CHECK-NEXT: %0 = load i32, ptr undef, align 4
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
-%s = type { i32, %s**, [3 x i8], i8 }
+%s = type { i32, ptr, [3 x i8], i8 }
define void @test() {
entry:
br i1 undef, label %if.then226, label %if.else326
if.then226: ; preds = %if.else218
- %size227 = getelementptr inbounds %s, %s* undef, i32 0, i32 0
- %0 = load i32, i32* %size227, align 4
+ %0 = load i32, ptr undef, align 4
unreachable
if.else326: ; preds = %if.else218
- %size330 = getelementptr inbounds %s, %s* undef, i32 0, i32 0
- %1 = load i32, i32* %size330, align 4
+ %1 = load i32, ptr undef, align 4
unreachable
cleanup: ; preds = %while.end, %cond.end118
; Function Attrs:
define i32 @main() {
entry:
- %0 = load volatile i32, i32* @g_x_s, align 4
- %1 = load volatile i32, i32* @g_z_s, align 4
- %2 = load volatile i32, i32* @g_x_u, align 4
- %3 = load volatile i32, i32* @g_z_u, align 4
- %4 = load volatile i32, i32* @g_m, align 4
+ %0 = load volatile i32, ptr @g_x_s, align 4
+ %1 = load volatile i32, ptr @g_z_s, align 4
+ %2 = load volatile i32, ptr @g_x_u, align 4
+ %3 = load volatile i32, ptr @g_z_u, align 4
+ %4 = load volatile i32, ptr @g_m, align 4
%call = call i64 @func() #4
%conv = sext i32 %1 to i64
%cmp = icmp ne i64 %call, %conv
]
sw0:
- store i32 1, i32* @G
+ store i32 1, ptr @G
br label %exit
sw1:
- store i32 1, i32* @G
+ store i32 1, ptr @G
br label %exit
exit:
- call void @longjmp(%struct.__jmp_buf_tag* @test_exit_buf, i32 1) #0
+ call void @longjmp(ptr @test_exit_buf, i32 1) #0
unreachable
}
-declare void @longjmp(%struct.__jmp_buf_tag*, i32) #0
+declare void @longjmp(ptr, i32) #0
attributes #0 = { noreturn nounwind }
br i1 undef, label %sw0, label %sw1
sw0:
- store i32 1, i32* @G
+ store i32 1, ptr @G
unreachable
sw1:
- store i32 1, i32* @G
+ store i32 1, ptr @G
ret void
}
target triple = "x86_64-pc_linux"
; Function Attrs: nounwind uwtable
-define float* @foo(i32* noalias nocapture readonly %in, float* noalias %out, i32 %size, i32* nocapture readonly %trigger) {
+define ptr @foo(ptr noalias nocapture readonly %in, ptr noalias %out, i32 %size, ptr nocapture readonly %trigger) {
entry:
%cmp11 = icmp eq i32 %size, 0
br i1 %cmp11, label %for.end, label %for.body.lr.ph
; CHECK-LABEL: for.body
; CHECK: load
-; CHECK: %2 = getelementptr inbounds i32, i32* %in, i64 %indvars.iv
-; CHECK: %3 = load i32, i32* %2, align 4
+; CHECK: %2 = getelementptr inbounds i32, ptr %in, i64 %indvars.iv
+; CHECK: %3 = load i32, ptr %2, align 4
for.body: ; preds = %for.body.lr.ph, %for.inc
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.inc ]
- %arrayidx = getelementptr inbounds i32, i32* %trigger, i64 %indvars.iv
- %1 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %trigger, i64 %indvars.iv
+ %1 = load i32, ptr %arrayidx, align 4
%cmp1 = icmp sgt i32 %1, 0
br i1 %cmp1, label %if.then, label %if.else
; CHECK-LABEL: if.then
if.then: ; preds = %for.body
; This load should be hoisted
- %arrayidx3 = getelementptr inbounds i32, i32* %in, i64 %indvars.iv
- %2 = load i32, i32* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr %in, i64 %indvars.iv
+ %2 = load i32, ptr %arrayidx3, align 4
%conv = sitofp i32 %2 to float
%add = fadd float %conv, 5.000000e-01
- %arrayidx5 = getelementptr inbounds float, float* %out, i64 %indvars.iv
- store float %add, float* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds float, ptr %out, i64 %indvars.iv
+ store float %add, ptr %arrayidx5, align 4
br label %for.inc
if.else: ; preds = %for.body
- %arrayidx7 = getelementptr inbounds float, float* %out, i64 %indvars.iv
- %3 = load float, float* %arrayidx7, align 4
+ %arrayidx7 = getelementptr inbounds float, ptr %out, i64 %indvars.iv
+ %3 = load float, ptr %arrayidx7, align 4
%div = fdiv float %3, 3.000000e+00
- store float %div, float* %arrayidx7, align 4
+ store float %div, ptr %arrayidx7, align 4
; This load should be hoisted in spite of store
- %arrayidx9 = getelementptr inbounds i32, i32* %in, i64 %indvars.iv
- %4 = load i32, i32* %arrayidx9, align 4
+ %arrayidx9 = getelementptr inbounds i32, ptr %in, i64 %indvars.iv
+ %4 = load i32, ptr %arrayidx9, align 4
%conv10 = sitofp i32 %4 to float
%add13 = fadd float %div, %conv10
- store float %add13, float* %arrayidx7, align 4
+ store float %add13, ptr %arrayidx7, align 4
br label %for.inc
for.inc: ; preds = %if.then, %if.else
br label %for.end
for.end: ; preds = %entry, %for.cond.for.end_crit_edge
- ret float* %out
+ ret ptr %out
}
; RUN: opt -passes=gvn-hoist -S < %s | FileCheck %s
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
-%struct.node = type { i64, %struct.node*, %struct.node*, %struct.node*, i64, %struct.arc*, i64, i64, i64 }
+%struct.node = type { i64, ptr, ptr, ptr, i64, ptr, i64, i64, i64 }
%struct.arc = type { i64, i64, i64 }
-define i64 @foo(%struct.node* nocapture readonly %r) nounwind {
+define i64 @foo(ptr nocapture readonly %r) nounwind {
entry:
- %node.0.in16 = getelementptr inbounds %struct.node, %struct.node* %r, i64 0, i32 2
- %node.017 = load %struct.node*, %struct.node** %node.0.in16, align 8
- %tobool18 = icmp eq %struct.node* %node.017, null
+ %node.0.in16 = getelementptr inbounds %struct.node, ptr %r, i64 0, i32 2
+ %node.017 = load ptr, ptr %node.0.in16, align 8
+ %tobool18 = icmp eq ptr %node.017, null
br i1 %tobool18, label %while.end, label %while.body.preheader
; CHECK-LABEL: while.body.preheader
br label %while.body
while.body: ; preds = %while.body.preheader, %if.end
- %node.020 = phi %struct.node* [ %node.0, %if.end ], [ %node.017, %while.body.preheader ]
+ %node.020 = phi ptr [ %node.0, %if.end ], [ %node.017, %while.body.preheader ]
%sum.019 = phi i64 [ %inc, %if.end ], [ 0, %while.body.preheader ]
- %orientation = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 4
- %0 = load i64, i64* %orientation, align 8
+ %orientation = getelementptr inbounds %struct.node, ptr %node.020, i64 0, i32 4
+ %0 = load i64, ptr %orientation, align 8
%cmp = icmp eq i64 %0, 1
br i1 %cmp, label %if.then, label %if.else
; CHECK: if.then
if.then: ; preds = %while.body
- %a = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 5
+ %a = getelementptr inbounds %struct.node, ptr %node.020, i64 0, i32 5
; CHECK-NOT: load %struct.arc
- %1 = load %struct.arc*, %struct.arc** %a, align 8
- %cost = getelementptr inbounds %struct.arc, %struct.arc* %1, i64 0, i32 0
-; CHECK-NOT: load i64, i64*
- %2 = load i64, i64* %cost, align 8
- %pred = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 1
-; CHECK-NOT: load %struct.node*, %struct.node**
- %3 = load %struct.node*, %struct.node** %pred, align 8
- %p = getelementptr inbounds %struct.node, %struct.node* %3, i64 0, i32 6
-; CHECK-NOT: load i64, i64*
- %4 = load i64, i64* %p, align 8
+ %1 = load ptr, ptr %a, align 8
+; CHECK-NOT: load i64, ptr
+ %2 = load i64, ptr %1, align 8
+ %pred = getelementptr inbounds %struct.node, ptr %node.020, i64 0, i32 1
+; CHECK-NOT: load ptr, ptr
+ %3 = load ptr, ptr %pred, align 8
+ %p = getelementptr inbounds %struct.node, ptr %3, i64 0, i32 6
+; CHECK-NOT: load i64, ptr
+ %4 = load i64, ptr %p, align 8
%add = add nsw i64 %4, %2
- %p1 = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 6
+ %p1 = getelementptr inbounds %struct.node, ptr %node.020, i64 0, i32 6
; FIXME: store i64
- store i64 %add, i64* %p1, align 8
+ store i64 %add, ptr %p1, align 8
br label %if.end
; CHECK: if.else
if.else: ; preds = %while.body
- %pred2 = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 1
-; CHECK-NOT: load %struct.node*, %struct.node**
- %5 = load %struct.node*, %struct.node** %pred2, align 8
- %p3 = getelementptr inbounds %struct.node, %struct.node* %5, i64 0, i32 6
-; CHECK-NOT: load i64, i64*
- %6 = load i64, i64* %p3, align 8
- %a4 = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 5
-; CHECK-NOT: load %struct.arc*, %struct.arc**
- %7 = load %struct.arc*, %struct.arc** %a4, align 8
- %cost5 = getelementptr inbounds %struct.arc, %struct.arc* %7, i64 0, i32 0
-; CHECK-NOT: load i64, i64*
- %8 = load i64, i64* %cost5, align 8
+ %pred2 = getelementptr inbounds %struct.node, ptr %node.020, i64 0, i32 1
+; CHECK-NOT: load ptr, ptr
+ %5 = load ptr, ptr %pred2, align 8
+ %p3 = getelementptr inbounds %struct.node, ptr %5, i64 0, i32 6
+; CHECK-NOT: load i64, ptr
+ %6 = load i64, ptr %p3, align 8
+ %a4 = getelementptr inbounds %struct.node, ptr %node.020, i64 0, i32 5
+; CHECK-NOT: load ptr, ptr
+ %7 = load ptr, ptr %a4, align 8
+; CHECK-NOT: load i64, ptr
+ %8 = load i64, ptr %7, align 8
%sub = sub nsw i64 %6, %8
- %p6 = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 6
+ %p6 = getelementptr inbounds %struct.node, ptr %node.020, i64 0, i32 6
; FIXME: store i64
- store i64 %sub, i64* %p6, align 8
+ store i64 %sub, ptr %p6, align 8
br label %if.end
; CHECK: if.end
if.end: ; preds = %if.else, %if.then
; FIXME: store
%inc = add nsw i64 %sum.019, 1
- %node.0.in = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 2
- %node.0 = load %struct.node*, %struct.node** %node.0.in, align 8
- %tobool = icmp eq %struct.node* %node.0, null
+ %node.0.in = getelementptr inbounds %struct.node, ptr %node.020, i64 0, i32 2
+ %node.0 = load ptr, ptr %node.0.in, align 8
+ %tobool = icmp eq ptr %node.0, null
br i1 %tobool, label %while.end.loopexit, label %while.body
while.end.loopexit: ; preds = %if.end
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
-define void @f(i8* %p) {
+define void @f(ptr %p) {
entry:
switch i4 undef, label %if.then30 [
i4 4, label %if.end
br i1 undef, label %e, label %e.thread
e.thread:
- store i8 0, i8* %p, align 4
+ store i8 0, ptr %p, align 4
br label %if.then30
if.then30:
unreachable
e:
- store i8 0, i8* %p, align 4
+ store i8 0, ptr %p, align 4
unreachable
}
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define void @test1(i1 %a, i1** %d) {
+define void @test1(i1 %a, ptr %d) {
entry:
- %0 = load i1*, i1** %d, align 8
+ %0 = load ptr, ptr %d, align 8
br i1 %a, label %if.then, label %if.else
if.then: ; preds = %entry
br i1 %c.0, label %if.then2, label %if.else3
if.then2: ; preds = %if.end
- %rc = getelementptr inbounds i1, i1* %0, i64 0
- store i1 %c.0, i1* %rc, align 4
+ store i1 %c.0, ptr %0, align 4
br label %if.end6
if.else3: ; preds = %if.end
- %rc5 = getelementptr inbounds i1, i1* %0, i64 0
- store i1 %c.0, i1* %rc5, align 4
+ store i1 %c.0, ptr %0, align 4
br label %if.end6
if.end6: ; preds = %if.else3, %if.then2
}
; CHECK-LABEL: define void @test1(
-; CHECK: %[[load:.*]] = load i1*, i1** %d, align 8
+; CHECK: %[[load:.*]] = load ptr, ptr %d, align 8
; CHECK: %[[phi:.*]] = phi i1 [ true, {{.*}} ], [ false, {{.*}} ]
-; CHECK: %[[gep0:.*]] = getelementptr inbounds i1, i1* %[[load]], i64 0
-; CHECK: store i1 %[[phi]], i1* %[[gep0]], align 4
+; CHECK: store i1 %[[phi]], ptr %[[load]], align 4
; Check that store instructions are hoisted.
; CHECK-NOT: store
\ No newline at end of file
br label %for.cond
for.cond: ; preds = %for.inc5, %entry
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%cmp = icmp slt i32 %0, 1
br i1 %cmp, label %for.cond1, label %for.end7
for.cond1: ; preds = %for.cond, %for.inc
- %1 = load i32, i32* @a, align 4
+ %1 = load i32, ptr @a, align 4
%cmp2 = icmp slt i32 %1, 1
br i1 %cmp2, label %for.body3, label %for.inc5
if.then: ; preds = %for.body3
%inc = add nsw i32 %1, 1
- store i32 %inc, i32* @a, align 4
+ store i32 %inc, ptr @a, align 4
br label %for.inc
for.inc: ; preds = %for.body3, %if.then
- %2 = load i32, i32* @a, align 4
+ %2 = load i32, ptr @a, align 4
%inc4 = add nsw i32 %2, 1
- store i32 %inc4, i32* @a, align 4
+ store i32 %inc4, ptr @a, align 4
br label %for.cond1
for.inc5: ; preds = %for.cond1
%inc6 = add nsw i32 %1, 1
- store i32 %inc6, i32* @a, align 4
+ store i32 %inc6, ptr @a, align 4
br label %for.cond
for.end7: ; preds = %for.cond
; CHECK: store
-%struct._MUSIC_OP_API_ = type { %struct._FILE_OPERATE_*, %struct.__MUSIC_API* }
-%struct._FILE_OPERATE_ = type { %struct._FILE_OPERATE_INIT_*, %struct._lg_dev_info_* }
-%struct._FILE_OPERATE_INIT_ = type { i32, i32, i32, i32, i32*, i8*, i32 }
-%struct._lg_dev_info_ = type { %struct.os_event, i32, i32, %struct._lg_dev_hdl_*, i8, i8, i8, i8, i8 }
-%struct.os_event = type { i8, i32, i8*, %union.anon }
+%struct._MUSIC_OP_API_ = type { ptr, ptr }
+%struct._FILE_OPERATE_ = type { ptr, ptr }
+%struct._FILE_OPERATE_INIT_ = type { i32, i32, i32, i32, ptr, ptr, i32 }
+%struct._lg_dev_info_ = type { %struct.os_event, i32, i32, ptr, i8, i8, i8, i8, i8 }
+%struct.os_event = type { i8, i32, ptr, %union.anon }
%union.anon = type { %struct.event_cnt }
%struct.event_cnt = type { i16 }
-%struct._lg_dev_hdl_ = type { i8*, i8*, i8*, i8*, i8* }
-%struct.__MUSIC_API = type <{ i8*, i8*, i32, %struct._DEC_API, %struct._DEC_API_IO*, %struct._FS_BRK_POINT* }>
-%struct._DEC_API = type { %struct._DEC_PHY*, i8*, i8*, i8* (i8*)*, i32* (i8*)*, i8*, %struct._AAC_DEFAULT_SETTING, i32, i32, i8*, %struct.decoder_inf*, i32, i8, i8*, i8, i8* }
-%struct._DEC_PHY = type { i8*, %struct.__audio_decoder_ops*, i8*, %struct.if_decoder_io, %struct.if_dec_file*, i8*, i32 (i8*)*, i32, i8, %struct.__FF_FR }
-%struct.__audio_decoder_ops = type { i8*, i32 (i8*, %struct.if_decoder_io*, i8*)*, i32 (i8*)*, i32 (i8*, i32)*, %struct.decoder_inf* (i8*)*, i32 (i8*)*, i32 (i8*)*, i32 (...)*, i32 (...)*, i32 (...)*, void (i8*, i32)*, void (i8*, i32, i8*, i32)*, i32 (i8*, i32, i8*)* }
-%struct.if_decoder_io = type { i8*, i32 (i8*, i32, i8*, i32, i8)*, i32 (i8*, i32, i8*)*, void (i8*, i8*, i32)*, i32 (i8*)*, i32 (i8*, i32, i32)* }
-%struct.if_dec_file = type { i32 (i8*, i8*, i32)*, i32 (i8*, i32, i32)* }
+%struct._lg_dev_hdl_ = type { ptr, ptr, ptr, ptr, ptr }
+%struct.__MUSIC_API = type <{ ptr, ptr, i32, %struct._DEC_API, ptr, ptr }>
+%struct._DEC_API = type { ptr, ptr, ptr, ptr, ptr, ptr, %struct._AAC_DEFAULT_SETTING, i32, i32, ptr, ptr, i32, i8, ptr, i8, ptr }
+%struct._DEC_PHY = type { ptr, ptr, ptr, %struct.if_decoder_io, ptr, ptr, ptr, i32, i8, %struct.__FF_FR }
+%struct.__audio_decoder_ops = type { ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
+%struct.if_decoder_io = type { ptr, ptr, ptr, ptr, ptr, ptr }
+%struct.if_dec_file = type { ptr, ptr }
%struct.__FF_FR = type { i32, i32, i8, i8, i8 }
%struct._AAC_DEFAULT_SETTING = type { i32, i32, i32 }
%struct.decoder_inf = type { i16, i16, i32, i32 }
-%struct._DEC_API_IO = type { i8*, i8*, i16 (i8*, i8*, i16)*, i32 (i8*, i8, i32)*, i32 (%struct.decoder_inf*, i32)*, %struct.__OP_IO, i32, i32 }
-%struct.__OP_IO = type { i8*, i8* (i8*, i8*, i32)* }
+%struct._DEC_API_IO = type { ptr, ptr, ptr, ptr, ptr, %struct.__OP_IO, i32, i32 }
+%struct.__OP_IO = type { ptr, ptr }
%struct._FS_BRK_POINT = type { %struct._FS_BRK_INFO, i32, i32 }
%struct._FS_BRK_INFO = type { i32, i32, [8 x i8], i8, i8, i16 }
@.str = external hidden unnamed_addr constant [10 x i8], align 1
-define void @music_task(i8* nocapture readnone %p) local_unnamed_addr {
+define void @music_task(ptr nocapture readnone %p) local_unnamed_addr {
entry:
- %mapi = alloca %struct._MUSIC_OP_API_*, align 8
- %0 = bitcast %struct._MUSIC_OP_API_** %mapi to i8*
- call void @llvm.lifetime.start.p0i8(i64 8, i8* %0)
- store %struct._MUSIC_OP_API_* null, %struct._MUSIC_OP_API_** %mapi, align 8, !tbaa !1
- %call = call i32 @music_decoder_init(%struct._MUSIC_OP_API_** nonnull %mapi)
+ %mapi = alloca ptr, align 8
+ call void @llvm.lifetime.start.p0(i64 8, ptr %mapi)
+ store ptr null, ptr %mapi, align 8, !tbaa !1
+ %call = call i32 @music_decoder_init(ptr nonnull %mapi)
br label %while.cond
while.cond.loopexit: ; preds = %while.cond2
br label %while.cond
while.cond: ; preds = %while.cond.loopexit, %entry
- %1 = load %struct._MUSIC_OP_API_*, %struct._MUSIC_OP_API_** %mapi, align 8, !tbaa !1
- %dop_api = getelementptr inbounds %struct._MUSIC_OP_API_, %struct._MUSIC_OP_API_* %1, i64 0, i32 1
- %2 = load %struct.__MUSIC_API*, %struct.__MUSIC_API** %dop_api, align 8, !tbaa !5
- %file_num = getelementptr inbounds %struct.__MUSIC_API, %struct.__MUSIC_API* %2, i64 0, i32 2
- %3 = bitcast i32* %file_num to i8*
- %call1 = call i32 @music_play_api(%struct._MUSIC_OP_API_* %1, i32 33, i32 0, i32 28, i8* %3)
+ %0 = load ptr, ptr %mapi, align 8, !tbaa !1
+ %dop_api = getelementptr inbounds %struct._MUSIC_OP_API_, ptr %0, i64 0, i32 1
+ %1 = load ptr, ptr %dop_api, align 8, !tbaa !5
+ %file_num = getelementptr inbounds %struct.__MUSIC_API, ptr %1, i64 0, i32 2
+ %call1 = call i32 @music_play_api(ptr %0, i32 33, i32 0, i32 28, ptr %file_num)
br label %while.cond2
while.cond2: ; preds = %while.cond2.backedge, %while.cond
]
sw.bb: ; preds = %while.cond2
- %4 = load %struct._MUSIC_OP_API_*, %struct._MUSIC_OP_API_** %mapi, align 8, !tbaa !1
- %dop_api4 = getelementptr inbounds %struct._MUSIC_OP_API_, %struct._MUSIC_OP_API_* %4, i64 0, i32 1
- %5 = load %struct.__MUSIC_API*, %struct.__MUSIC_API** %dop_api4, align 8, !tbaa !5
- %file_num5 = getelementptr inbounds %struct.__MUSIC_API, %struct.__MUSIC_API* %5, i64 0, i32 2
- %6 = load i32, i32* %file_num5, align 1, !tbaa !7
- %call6 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str, i64 0, i64 0), i32 %6)
+ %2 = load ptr, ptr %mapi, align 8, !tbaa !1
+ %dop_api4 = getelementptr inbounds %struct._MUSIC_OP_API_, ptr %2, i64 0, i32 1
+ %3 = load ptr, ptr %dop_api4, align 8, !tbaa !5
+ %file_num5 = getelementptr inbounds %struct.__MUSIC_API, ptr %3, i64 0, i32 2
+ %4 = load i32, ptr %file_num5, align 1, !tbaa !7
+ %call6 = call i32 (ptr, ...) @printf(ptr @.str, i32 %4)
br label %while.cond2.backedge
sw.bb7: ; preds = %while.cond2
- %7 = load %struct._MUSIC_OP_API_*, %struct._MUSIC_OP_API_** %mapi, align 8, !tbaa !1
- %dop_api8 = getelementptr inbounds %struct._MUSIC_OP_API_, %struct._MUSIC_OP_API_* %7, i64 0, i32 1
- %8 = load %struct.__MUSIC_API*, %struct.__MUSIC_API** %dop_api8, align 8, !tbaa !5
- %file_num9 = getelementptr inbounds %struct.__MUSIC_API, %struct.__MUSIC_API* %8, i64 0, i32 2
- store i32 1, i32* %file_num9, align 1, !tbaa !7
- %9 = bitcast i32* %file_num9 to i8*
- %call12 = call i32 @music_play_api(%struct._MUSIC_OP_API_* %7, i32 34, i32 0, i32 24, i8* %9)
+ %5 = load ptr, ptr %mapi, align 8, !tbaa !1
+ %dop_api8 = getelementptr inbounds %struct._MUSIC_OP_API_, ptr %5, i64 0, i32 1
+ %6 = load ptr, ptr %dop_api8, align 8, !tbaa !5
+ %file_num9 = getelementptr inbounds %struct.__MUSIC_API, ptr %6, i64 0, i32 2
+ store i32 1, ptr %file_num9, align 1, !tbaa !7
+ %call12 = call i32 @music_play_api(ptr %5, i32 34, i32 0, i32 24, ptr %file_num9)
br label %while.cond2.backedge
sw.bb13: ; preds = %while.cond2
- %10 = load %struct._MUSIC_OP_API_*, %struct._MUSIC_OP_API_** %mapi, align 8, !tbaa !1
- %dop_api14 = getelementptr inbounds %struct._MUSIC_OP_API_, %struct._MUSIC_OP_API_* %10, i64 0, i32 1
- %11 = load %struct.__MUSIC_API*, %struct.__MUSIC_API** %dop_api14, align 8, !tbaa !5
- %file_num15 = getelementptr inbounds %struct.__MUSIC_API, %struct.__MUSIC_API* %11, i64 0, i32 2
- store i32 1, i32* %file_num15, align 1, !tbaa !7
- %12 = bitcast i32* %file_num15 to i8*
- %call18 = call i32 @music_play_api(%struct._MUSIC_OP_API_* %10, i32 35, i32 0, i32 26, i8* %12)
+ %7 = load ptr, ptr %mapi, align 8, !tbaa !1
+ %dop_api14 = getelementptr inbounds %struct._MUSIC_OP_API_, ptr %7, i64 0, i32 1
+ %8 = load ptr, ptr %dop_api14, align 8, !tbaa !5
+ %file_num15 = getelementptr inbounds %struct.__MUSIC_API, ptr %8, i64 0, i32 2
+ store i32 1, ptr %file_num15, align 1, !tbaa !7
+ %call18 = call i32 @music_play_api(ptr %7, i32 35, i32 0, i32 26, ptr %file_num15)
br label %while.cond2.backedge
sw.default: ; preds = %while.cond2
- %13 = load %struct._MUSIC_OP_API_*, %struct._MUSIC_OP_API_** %mapi, align 8, !tbaa !1
- %call19 = call i32 @music_play_api(%struct._MUSIC_OP_API_* %13, i32 33, i32 0, i32 22, i8* null)
+ %9 = load ptr, ptr %mapi, align 8, !tbaa !1
+ %call19 = call i32 @music_play_api(ptr %9, i32 33, i32 0, i32 22, ptr null)
br label %while.cond2.backedge
while.cond2.backedge: ; preds = %sw.default, %sw.bb13, %sw.bb7, %sw.bb
br label %while.cond2
}
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare i32 @music_decoder_init(%struct._MUSIC_OP_API_**)
-declare i32 @music_play_api(%struct._MUSIC_OP_API_*, i32, i32, i32, i8*)
-declare i32 @printf(i8* nocapture readonly, ...)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare i32 @music_decoder_init(ptr)
+declare i32 @music_play_api(ptr, i32, i32, i32, ptr)
+declare i32 @printf(ptr nocapture readonly, ...)
!0 = !{!"clang version 4.0.0 "}
!1 = !{!2, !2, i64 0}
; Make sure the two stores @B do not get hoisted past the load @B.
-; CHECK-LABEL: define i8* @Foo
+; CHECK-LABEL: define ptr @Foo
; CHECK: store
; CHECK: store
; CHECK: load
; CHECK: store
@A = external global i8
-@B = external global i8*
+@B = external global ptr
-define i8* @Foo() {
- store i8 0, i8* @A
+define ptr @Foo() {
+ store i8 0, ptr @A
br i1 undef, label %if.then, label %if.else
if.then:
- store i8* null, i8** @B
- ret i8* null
+ store ptr null, ptr @B
+ ret ptr null
if.else:
- %1 = load i8*, i8** @B
- store i8* null, i8** @B
- ret i8* %1
+ %1 = load ptr, ptr @B
+ store ptr null, ptr @B
+ ret ptr %1
}
; Make sure the two stores @B do not get hoisted past the store @GlobalVar.
-; CHECK-LABEL: define i8* @Fun
+; CHECK-LABEL: define ptr @Fun
; CHECK: store
; CHECK: store
; CHECK: store
@GlobalVar = internal global i8 0
-define i8* @Fun() {
- store i8 0, i8* @A
+define ptr @Fun() {
+ store i8 0, ptr @A
br i1 undef, label %if.then, label %if.else
if.then:
- store i8* null, i8** @B
- ret i8* null
+ store ptr null, ptr @B
+ ret ptr null
if.else:
- store i8 0, i8* @GlobalVar
- store i8* null, i8** @B
- %1 = load i8*, i8** @B
- ret i8* %1
+ store i8 0, ptr @GlobalVar
+ store ptr null, ptr @B
+ %1 = load ptr, ptr @B
+ ret ptr %1
}
define void @_Z3fn2v() #0 {
entry:
- %a = alloca i8*, align 8
+ %a = alloca ptr, align 8
%b = alloca i32, align 4
- %0 = load i8*, i8** %a, align 8
- store i8 0, i8* %0, align 1
- %1 = load i32, i32* %b, align 4
+ %0 = load ptr, ptr %a, align 8
+ store i8 0, ptr %0, align 1
+ %1 = load i32, ptr %b, align 4
%tobool = icmp ne i32 %1, 0
br i1 %tobool, label %if.then, label %if.end
if.then: ; preds = %entry
%call = call i64 @_Z3fn1v() #2
%conv = trunc i64 %call to i32
- store i32 %conv, i32* %b, align 4
+ store i32 %conv, ptr %b, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
- %2 = load i8*, i8** %a, align 8
- store i8 0, i8* %2, align 1
+ %2 = load ptr, ptr %a, align 8
+ store i8 0, ptr %2, align 1
ret void
}
br label %do.body
do.body: ; preds = %do.body, %entry
- %tmp9 = load i32, i32* @heap, align 4
+ %tmp9 = load i32, ptr @heap, align 4
%cmp = call i1 @pqdownheap(i32 %tmp9)
br i1 %cmp, label %do.body, label %do.end
do.end: ; preds = %do.body
- %tmp20 = load i32, i32* @heap, align 4
+ %tmp20 = load i32, ptr @heap, align 4
ret i32 %tmp20
}
br label %for.cond
for.cond:
- %a3 = load volatile i1, i1* @v
+ %a3 = load volatile i1, ptr @v
br i1 %a3, label %for.body, label %while.end
for.body:
br label %if.then
if.then:
- %tmp4 = load i32, i32* @i, align 4
+ %tmp4 = load i32, ptr @i, align 4
br label %for.cond
while.end:
br label %do.body
do.body:
- %tmp9 = load i32, i32* getelementptr inbounds ([573 x i32], [573 x i32]* @j,
+ %tmp9 = load i32, ptr getelementptr inbounds ([573 x i32], ptr @j,
i32 0, i32 1), align 4
- %tmp10 = load i32, i32* @i, align 4
+ %tmp10 = load i32, ptr @i, align 4
call void @fn()
- %a1 = load volatile i1, i1* @v
+ %a1 = load volatile i1, ptr @v
br i1 %a1, label %do.body, label %do.end
do.end:
- %tmp20 = load i32, i32* getelementptr inbounds ([573 x i32], [573 x i32]* @j,
+ %tmp20 = load i32, ptr getelementptr inbounds ([573 x i32], ptr @j,
i32 0, i32 1), align 4
ret i32 %tmp20
}
; RUN: opt < %s -passes=gvn-hoist -S | FileCheck %s
-@g = external constant i8*
+@g = external constant ptr
declare i32 @gxx_personality(...)
declare void @f0()
;CHECK-LABEL: @func
-define void @func() personality i8* bitcast (i32 (...)* @gxx_personality to i8*) {
+define void @func() personality ptr @gxx_personality {
invoke void @f0()
to label %3 unwind label %1
1:
- %2 = landingpad { i8*, i32 }
- catch i8* bitcast (i8** @g to i8*)
- catch i8* null
+ %2 = landingpad { ptr, i32 }
+ catch ptr @g
+ catch ptr null
br label %16
3:
br i1 undef, label %4, label %10
;CHECK: 4:
-;CHECK-NEXT: %5 = load i32*, i32** undef, align 8
+;CHECK-NEXT: %5 = load ptr, ptr undef, align 8
;CHECK-NEXT: invoke void @f1()
4:
- %5 = load i32*, i32** undef, align 8
+ %5 = load ptr, ptr undef, align 8
invoke void @f1()
to label %6 unwind label %1
;CHECK: 6:
-;CHECK-NEXT: %7 = load i32*, i32** undef, align 8
-;CHECK-NEXT: %8 = load i32*, i32** undef, align 8
+;CHECK-NEXT: %7 = load ptr, ptr undef, align 8
+;CHECK-NEXT: %8 = load ptr, ptr undef, align 8
6:
- %7 = load i32*, i32** undef, align 8
- %8 = load i32*, i32** undef, align 8
+ %7 = load ptr, ptr undef, align 8
+ %8 = load ptr, ptr undef, align 8
br i1 true, label %9, label %17
9:
to label %11 unwind label %1
11:
- %12 = invoke signext i32 undef(i32* null, i32 signext undef, i1 zeroext undef)
+ %12 = invoke signext i32 undef(ptr null, i32 signext undef, i1 zeroext undef)
to label %13 unwind label %14
13:
unreachable
14:
- %15 = landingpad { i8*, i32 }
- catch i8* bitcast (i8** @g to i8*)
- catch i8* null
+ %15 = landingpad { ptr, i32 }
+ catch ptr @g
+ catch ptr null
br label %16
16:
ret void
; uselistorder directives
- uselistorder void ()* @f0, { 1, 0 }
+ uselistorder ptr @f0, { 1, 0 }
uselistorder label %1, { 0, 3, 1, 2 }
}
; FIXME: Hoist loads from bb58 and bb45 to bb41.
@g_10 = external global i32, align 4
-@g_536 = external global i8*, align 8
-@g_1629 = external global i32**, align 8
-@g_963 = external global i32**, align 8
-@g_1276 = external global i32**, align 8
+@g_536 = external global ptr, align 8
+@g_1629 = external global ptr, align 8
+@g_963 = external global ptr, align 8
+@g_1276 = external global ptr, align 8
;CHECK-LABEL: @func_22
-define void @func_22(i32* %arg, i32* %arg1) {
+define void @func_22(ptr %arg, ptr %arg1) {
bb:
br label %bb12
bb15:
%tmp183 = trunc i16 0 to i8
- %tmp20 = load i8*, i8** @g_536, align 8
- %tmp21 = load i8, i8* %tmp20, align 1
+ %tmp20 = load ptr, ptr @g_536, align 8
+ %tmp21 = load i8, ptr %tmp20, align 1
%tmp23 = or i8 %tmp21, %tmp183
- store i8 %tmp23, i8* %tmp20, align 1
+ store i8 %tmp23, ptr %tmp20, align 1
%tmp5.i = icmp eq i8 %tmp23, 0
br i1 %tmp5.i, label %safe_div_func_uint8_t_u_u.exit, label %bb8.i
;CHECK: bb41:
bb41:
- %tmp43 = load i32, i32* %arg, align 4
+ %tmp43 = load i32, ptr %arg, align 4
%tmp44 = icmp eq i32 %tmp43, 0
br i1 %tmp44, label %bb52, label %bb45
;CHECK: bb45:
-;CHECK: %tmp47 = load i32, i32* %arg1, align 4
+;CHECK: %tmp47 = load i32, ptr %arg1, align 4
;CHECK: %tmp48 = icmp eq i32 %tmp47, 0
bb45:
- %tmp47 = load i32, i32* %arg1, align 4
+ %tmp47 = load i32, ptr %arg1, align 4
%tmp48 = icmp eq i32 %tmp47, 0
br i1 %tmp48, label %bb50, label %bb64
bb50:
- %tmp51 = load volatile i32**, i32*** @g_963, align 8
+ %tmp51 = load volatile ptr, ptr @g_963, align 8
unreachable
bb52:
br label %bb52
;CHECK: bb58:
-;CHECK: %tmp60 = load i32, i32* %arg1, align 4
+;CHECK: %tmp60 = load i32, ptr %arg1, align 4
;CHECK: %tmp61 = icmp eq i32 %tmp60, 0
;CHECK: bb62:
;CHECK: load
;CHECK: load
bb58:
- %tmp60 = load i32, i32* %arg1, align 4
+ %tmp60 = load i32, ptr %arg1, align 4
%tmp61 = icmp eq i32 %tmp60, 0
br i1 %tmp61, label %bb62, label %bb64
bb62:
- %tmp63 = load volatile i32**, i32*** @g_1276, align 8
+ %tmp63 = load volatile ptr, ptr @g_1276, align 8
unreachable
bb64:
- %tmp65 = load volatile i32**, i32*** @g_1629, align 8
+ %tmp65 = load volatile ptr, ptr @g_1629, align 8
unreachable
; uselistorder directives
uselistorder i32 %spec.select, { 1, 0 }
- uselistorder i32* %arg1, { 1, 0 }
+ uselistorder ptr %arg1, { 1, 0 }
uselistorder label %bb64, { 1, 0 }
uselistorder label %bb52, { 1, 0 }
uselistorder label %bb41, { 1, 0 }
define void @func() {
; CHECK-LABEL: @func()
; CHECK: bb6:
-; CHECK: store i64 0, i64* undef, align 8
+; CHECK: store i64 0, ptr undef, align 8
; CHECK: bb7:
-; CHECK-NOT: store i64 0, i64* undef, align 8
+; CHECK-NOT: store i64 0, ptr undef, align 8
; CHECK: bb8:
-; CHECK-NOT: store i64 0, i64* undef, align 8
+; CHECK-NOT: store i64 0, ptr undef, align 8
entry:
br label %bb1
br i1 undef, label %bb7, label %bb8
bb7:
- store i64 0, i64* undef, align 8
+ store i64 0, ptr undef, align 8
unreachable
bb8:
- store i64 0, i64* undef, align 8
+ store i64 0, ptr undef, align 8
ret void
}
;CHECK-LABEL: @foo
-define void @foo(i32* %arg) {
+define void @foo(ptr %arg) {
bb0:
- %0 = bitcast i32* %arg to %S*
- %call.idx.i = getelementptr %S, %S* %0, i64 0, i32 0, i32 0
- %call.idx.val.i = load i32, i32* %call.idx.i
+ %call.idx.val.i = load i32, ptr %arg
br label %bb1
;CHECK: bb1:
;CHECK: %call264 = call zeroext i1 @bar
-;CHECK: store i32 %call.idx.val.i, i32* %call.idx.i
-;CHECK: %1 = getelementptr inbounds %S, %S* %0, i64 0, i32 0, i32 1
-;CHECK: store i64 undef, i64* %1
+;CHECK: store i32 %call.idx.val.i, ptr %arg
+;CHECK: %0 = getelementptr inbounds %S, ptr %arg, i64 0, i32 0, i32 1
+;CHECK: store i64 undef, ptr %0
;CHECK: br i1 %call264, label %bb2, label %bb3
bb1:
br i1 %call264, label %bb2, label %bb3
;CHECK: bb2:
-;CHECK-NOT: store i32 %call.idx.val.i, i32* %call.idx.i
-;CHECK-NOT: store i64 undef, i64* %{.*}
+;CHECK-NOT: store i32 %call.idx.val.i, ptr %arg
+;CHECK-NOT: store i64 undef, ptr %{.*}
bb2:
- store i32 %call.idx.val.i, i32* %call.idx.i
- %1 = getelementptr inbounds %S, %S* %0, i64 0, i32 0, i32 1
- store i64 undef, i64* %1
+ store i32 %call.idx.val.i, ptr %arg
+ %0 = getelementptr inbounds %S, ptr %arg, i64 0, i32 0, i32 1
+ store i64 undef, ptr %0
ret void
;CHECK: bb3:
-;CHECK-NOT: store i32 %call.idx.val.i, i32* %call.idx.i
-;CHECK-NOT: store i64 undef, i64* %{.*}
+;CHECK-NOT: store i32 %call.idx.val.i, ptr %arg
+;CHECK-NOT: store i64 undef, ptr %{.*}
bb3:
- store i32 %call.idx.val.i, i32* %call.idx.i
- %2 = getelementptr inbounds %S, %S* %0, i64 0, i32 0, i32 1
- store i64 undef, i64* %2
+ store i32 %call.idx.val.i, ptr %arg
+ %1 = getelementptr inbounds %S, ptr %arg, i64 0, i32 0, i32 1
+ store i64 undef, ptr %1
ret void
}
; CHECK-NEXT: bb:
; CHECK-NEXT: br label [[BB4_I:%.*]]
; CHECK: bb4.i:
-; CHECK-NEXT: [[I1_I:%.*]] = load volatile i32, i32* @g, align 4
+; CHECK-NEXT: [[I1_I:%.*]] = load volatile i32, ptr @g, align 4
; CHECK-NEXT: [[I32_I:%.*]] = icmp eq i32 [[I1_I]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[I32_I]])
-; CHECK-NEXT: [[I1_I_1:%.*]] = load volatile i32, i32* @g, align 4
+; CHECK-NEXT: [[I1_I_1:%.*]] = load volatile i32, ptr @g, align 4
; CHECK-NEXT: [[I32_I_1:%.*]] = icmp eq i32 [[I1_I_1]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[I32_I_1]])
-; CHECK-NEXT: [[I1_I_2:%.*]] = load volatile i32, i32* @g, align 4
+; CHECK-NEXT: [[I1_I_2:%.*]] = load volatile i32, ptr @g, align 4
; CHECK-NEXT: [[I32_I_2:%.*]] = icmp eq i32 [[I1_I_2]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[I32_I_2]])
; CHECK-NEXT: br label [[BB4_I]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-NEXT: unreachable
;
bb:
- %i1.i = load volatile i32, i32* @g
+ %i1.i = load volatile i32, ptr @g
%i32.i = icmp eq i32 %i1.i, 0
call void @llvm.assume(i1 %i32.i) #3
br label %bb4.i
bb4.i: ; preds = %bb4.i, %bb
- %i.i = load volatile i32, i32* @g
+ %i.i = load volatile i32, ptr @g
%i3.i = icmp eq i32 %i.i, 0
call void @llvm.assume(i1 %i3.i) #3
br label %bb4.i
declare i8 @ext(i1)
-define zeroext i1 @test1(i1 zeroext %flag, i32 %blksA, i32 %blksB, i32 %nblks, i8(i1)* %ext) {
+define zeroext i1 @test1(i1 zeroext %flag, i32 %blksA, i32 %blksB, i32 %nblks, ptr %ext) {
entry:
%cmp = icmp uge i32 %blksA, %nblks
br i1 %flag, label %if.then, label %if.else
ret i1 %tobool4
}
-define zeroext i1 @test2(i1 zeroext %flag, i32 %blksA, i32 %blksB, i32 %nblks, i8(i1)* %ext) {
+define zeroext i1 @test2(i1 zeroext %flag, i32 %blksA, i32 %blksB, i32 %nblks, ptr %ext) {
entry:
%cmp = icmp uge i32 %blksA, %nblks
br i1 %flag, label %if.then, label %if.else
ret i1 %tobool4
}
-define zeroext i1 @test3(i1 zeroext %flag, i32 %blksA, i32 %blksB, i32 %nblks, i8(i1)* %ext1, i8(i1)* %ext2) {
+define zeroext i1 @test3(i1 zeroext %flag, i32 %blksA, i32 %blksB, i32 %nblks, ptr %ext1, ptr %ext2) {
entry:
%cmp = icmp uge i32 %blksA, %nblks
br i1 %flag, label %if.then, label %if.else
; CHECK-LABEL: test3
-; CHECK: %[[x:.*]] = select i1 %flag, i8 (i1)* %ext1, i8 (i1)* %ext2
+; CHECK: %[[x:.*]] = select i1 %flag, ptr %ext1, ptr %ext2
; CHECK: call i8 %[[x]](i1 %cmp)
; CHECK-NOT: call
if.then:
define void @PR42346() {
; CHECK-LABEL: @PR42346(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL1:%.*]] = call %vec* @bar(%map* undef, %vec* (%map*)* undef)
+; CHECK-NEXT: [[CALL1:%.*]] = call ptr @bar(ptr undef, ptr undef)
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: if:
-; CHECK-NEXT: [[CALL2:%.*]] = call %vec* @baz(%map* undef)
+; CHECK-NEXT: [[CALL2:%.*]] = call ptr @baz(ptr undef)
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
entry:
- %call1 = call %vec* @bar(%map* undef, %vec* (%map*)* undef)
+ %call1 = call ptr @bar(ptr undef, ptr undef)
br label %exit
if:
- %call2 = call %vec* @baz(%map* undef)
+ %call2 = call ptr @baz(ptr undef)
br label %exit
exit:
ret void
}
-declare %vec* @bar(%map*, %vec* (%map*)*)
-declare %vec* @baz(%map*)
+declare ptr @bar(ptr, ptr)
+declare ptr @baz(ptr)
@b = internal global i32 2, !dbg !2
define void @use1() {
- %x = load i32, i32* @a
- %y = load i32, i32* @b
+ %x = load i32, ptr @a
+ %y = load i32, ptr @b
ret void
}
; CHECK: [[A]] = !DIGlobalVariableExpression(var: [[AVAR:![0-9]+]], expr: !DIExpression())
; RUN: opt -passes=globalopt -S -o - < %s | FileCheck %s
-@glbl = internal global i8* null
+@glbl = internal global ptr null
define void @test1a() {
; CHECK-LABEL: @test1a(
; CHECK-NOT: store
; CHECK-NEXT: ret void
- store i8* null, i8** @glbl
+ store ptr null, ptr @glbl
ret void
}
-define void @test1b(i8* %p) {
+define void @test1b(ptr %p) {
; CHECK-LABEL: @test1b(
; CHECK-NEXT: store
; CHECK-NEXT: ret void
- store i8* %p, i8** @glbl
+ store ptr %p, ptr @glbl
ret void
}
; CHECK-LABEL: @test2(
; CHECK: alloca i8
%txt = alloca i8
- call void @foo2(i8* %txt)
- %call2 = call i8* @strdup(i8* %txt)
- store i8* %call2, i8** @glbl
+ call void @foo2(ptr %txt)
+ %call2 = call ptr @strdup(ptr %txt)
+ store ptr %call2, ptr @glbl
ret void
}
-declare i8* @strdup(i8*)
-declare void @foo2(i8*)
+declare ptr @strdup(ptr)
+declare void @foo2(ptr)
-define void @test3() uwtable personality i32 (i32, i64, i8*, i8*)* @__gxx_personality_v0 {
+define void @test3() uwtable personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: @test3(
; CHECK-NOT: bb1:
; CHECK-NOT: bb2:
; CHECK: invoke
- %ptr = invoke i8* @_Znwm(i64 1)
+ %ptr = invoke ptr @_Znwm(i64 1)
to label %bb1 unwind label %bb2
bb1:
- store i8* %ptr, i8** @glbl
+ store ptr %ptr, ptr @glbl
unreachable
bb2:
- %tmp1 = landingpad { i8*, i32 }
+ %tmp1 = landingpad { ptr, i32 }
cleanup
- resume { i8*, i32 } %tmp1
+ resume { ptr, i32 } %tmp1
}
-declare i32 @__gxx_personality_v0(i32, i64, i8*, i8*)
-declare i8* @_Znwm(i64)
+declare i32 @__gxx_personality_v0(i32, i64, ptr, ptr)
+declare ptr @_Znwm(i64)
; RUN: opt -passes=globalopt -S -o - < %s | FileCheck %s
; The check here is that it doesn't crash.
-declare {}* @llvm.invariant.start.p0i8(i64 %size, i8* nocapture %ptr)
+declare ptr @llvm.invariant.start.p0(i64 %size, ptr nocapture %ptr)
@object1 = global { i32, i32 } zeroinitializer
; CHECK: @object1 = global { i32, i32 } zeroinitializer
define void @ctor1() {
- %ptr = bitcast {i32, i32}* @object1 to i8*
- call {}* @llvm.invariant.start.p0i8(i64 4, i8* %ptr)
+ call ptr @llvm.invariant.start.p0(i64 4, ptr @object1)
ret void
}
@llvm.global_ctors = appending constant
- [1 x { i32, void ()*, i8* }]
- [ { i32, void ()*, i8* } { i32 65535, void ()* @ctor1, i8* null } ]
+ [1 x { i32, ptr, ptr }]
+ [ { i32, ptr, ptr } { i32 65535, ptr @ctor1, ptr null } ]
@c = common global i32 0, align 4
@h = common global i32 0, align 4
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #0
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #0
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) #1
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #1
-declare i32* @m()
+declare ptr @m()
; CHECK-LABEL: define void @main()
; CHECK-NEXT: bb:
; CHECK-NEXT: %.sroa.4.i = alloca [20 x i8], align 2
; CHECK-NEXT: %.sroa.5.i = alloca [6 x i8], align 8
-; CHECK-NEXT: %i = bitcast [6 x i8]* %.sroa.5.i to i8*
define void @main() #2 {
bb:
%.sroa.4.i = alloca [20 x i8], align 2
%.sroa.5.i = alloca [6 x i8], align 8
- %i = bitcast [6 x i8]* %.sroa.5.i to i8*
- %i1 = load i32, i32* @h, align 4, !tbaa !4
+ %i1 = load i32, ptr @h, align 4, !tbaa !4
%i2 = icmp ne i32 %i1, 0
br i1 %i2, label %bb11, label %bb3
bb3: ; preds = %bb
- %i4 = call i32* @m()
- %.sroa.4.0..sroa_idx21.i = getelementptr inbounds [20 x i8], [20 x i8]* %.sroa.4.i, i64 0, i64 0
- call void @llvm.lifetime.start.p0i8(i64 20, i8* %.sroa.4.0..sroa_idx21.i)
- %.sroa.5.0..sroa_idx16.i = getelementptr inbounds [6 x i8], [6 x i8]* %.sroa.5.i, i64 0, i64 0
- call void @llvm.lifetime.start.p0i8(i64 6, i8* %.sroa.5.0..sroa_idx16.i)
- call void @llvm.memset.p0i8.i64(i8* align 2 %.sroa.4.0..sroa_idx21.i, i8 0, i64 20, i1 false)
- call void @llvm.memset.p0i8.i64(i8* align 8 %.sroa.5.0..sroa_idx16.i, i8 0, i64 6, i1 false)
- %i5 = load i32, i32* @c, align 4, !tbaa !4
+ %i4 = call ptr @m()
+ call void @llvm.lifetime.start.p0(i64 20, ptr %.sroa.4.i)
+ call void @llvm.lifetime.start.p0(i64 6, ptr %.sroa.5.i)
+ call void @llvm.memset.p0.i64(ptr align 2 %.sroa.4.i, i8 0, i64 20, i1 false)
+ call void @llvm.memset.p0.i64(ptr align 8 %.sroa.5.i, i8 0, i64 6, i1 false)
+ %i5 = load i32, ptr @c, align 4, !tbaa !4
%i6 = trunc i32 %i5 to i16
- call void @llvm.lifetime.end.p0i8(i64 20, i8* %.sroa.4.0..sroa_idx21.i)
- call void @llvm.lifetime.end.p0i8(i64 6, i8* %.sroa.5.0..sroa_idx16.i)
- call void @llvm.lifetime.start.p0i8(i64 6, i8* %i)
- call void @llvm.memset.p0i8.i64(i8* align 1 %i, i8 3, i64 6, i1 false)
+ call void @llvm.lifetime.end.p0(i64 20, ptr %.sroa.4.i)
+ call void @llvm.lifetime.end.p0(i64 6, ptr %.sroa.5.i)
+ call void @llvm.lifetime.start.p0(i64 6, ptr %.sroa.5.i)
+ call void @llvm.memset.p0.i64(ptr align 1 %.sroa.5.i, i8 3, i64 6, i1 false)
br label %bb7
bb7: ; preds = %bb7, %bb3
br i1 %i10, label %bb7, label %l.exit
l.exit: ; preds = %bb7
- call void @llvm.lifetime.end.p0i8(i64 6, i8* %i)
+ call void @llvm.lifetime.end.p0(i64 6, ptr %.sroa.5.i)
br label %bb11
bb11: ; preds = %l.exit, %bb
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "aarch64--linux-gnu"
-define i32 @outer1(i32* %ptr, i32 %i) {
- %C = call i32 @inner1(i32* %ptr, i32 %i)
+define i32 @outer1(ptr %ptr, i32 %i) {
+ %C = call i32 @inner1(ptr %ptr, i32 %i)
ret i32 %C
}
; CHECK: Analyzing call of inner1
; CHECK: NumInstructionsSimplified: 3
; CHECK: NumInstructions: 4
-define i32 @inner1(i32* %ptr, i32 %i) {
+define i32 @inner1(ptr %ptr, i32 %i) {
%E = sext i32 %i to i64
- %G = getelementptr inbounds i32, i32* %ptr, i64 %E
- %L = load i32, i32* %G
+ %G = getelementptr inbounds i32, ptr %ptr, i64 %E
+ %L = load i32, ptr %G
ret i32 %L
}
-define i32 @outer2(i32* %ptr, i32 %i) {
- %C = call i32 @inner2(i32* %ptr, i32 %i)
+define i32 @outer2(ptr %ptr, i32 %i) {
+ %C = call i32 @inner2(ptr %ptr, i32 %i)
ret i32 %C
}
; CHECK: Analyzing call of inner2
; CHECK: NumInstructionsSimplified: 3
; CHECK: NumInstructions: 4
-define i32 @inner2(i32* %ptr, i32 %i) {
+define i32 @inner2(ptr %ptr, i32 %i) {
%E = zext i32 %i to i64
- %G = getelementptr inbounds i32, i32* %ptr, i64 %E
- %L = load i32, i32* %G
+ %G = getelementptr inbounds i32, ptr %ptr, i64 %E
+ %L = load i32, ptr %G
ret i32 %L
}
-define i32 @outer3(i32* %ptr, i16 %i) {
- %C = call i32 @inner3(i32* %ptr, i16 %i)
+define i32 @outer3(ptr %ptr, i16 %i) {
+ %C = call i32 @inner3(ptr %ptr, i16 %i)
ret i32 %C
}
; CHECK: Analyzing call of inner3
; CHECK: NumInstructionsSimplified: 3
; CHECK: NumInstructions: 4
-define i32 @inner3(i32* %ptr, i16 %i) {
+define i32 @inner3(ptr %ptr, i16 %i) {
%E = zext i16 %i to i64
- %G = getelementptr inbounds i32, i32* %ptr, i64 %E
- %L = load i32, i32* %G
+ %G = getelementptr inbounds i32, ptr %ptr, i64 %E
+ %L = load i32, ptr %G
ret i32 %L
}
-define i16 @outer4(i8* %ptr) {
- %C = call i16 @inner4(i8* %ptr)
+define i16 @outer4(ptr %ptr) {
+ %C = call i16 @inner4(ptr %ptr)
ret i16 %C
}
; CHECK: Analyzing call of inner4
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i16 @inner4(i8* %ptr) {
- %L = load i8, i8* %ptr
+define i16 @inner4(ptr %ptr) {
+ %L = load i8, ptr %ptr
%E = zext i8 %L to i16
ret i16 %E
}
-define i16 @outer5(i8* %ptr) {
- %C = call i16 @inner5(i8* %ptr)
+define i16 @outer5(ptr %ptr) {
+ %C = call i16 @inner5(ptr %ptr)
ret i16 %C
}
; CHECK: Analyzing call of inner5
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i16 @inner5(i8* %ptr) {
- %L = load i8, i8* %ptr
+define i16 @inner5(ptr %ptr) {
+ %L = load i8, ptr %ptr
%E = sext i8 %L to i16
ret i16 %E
}
-define i32 @outer6(i8* %ptr) {
- %C = call i32 @inner6(i8* %ptr)
+define i32 @outer6(ptr %ptr) {
+ %C = call i32 @inner6(ptr %ptr)
ret i32 %C
}
; CHECK: Analyzing call of inner6
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i32 @inner6(i8* %ptr) {
- %L = load i8, i8* %ptr
+define i32 @inner6(ptr %ptr) {
+ %L = load i8, ptr %ptr
%E = zext i8 %L to i32
ret i32 %E
}
-define i32 @outer7(i8* %ptr) {
- %C = call i32 @inner7(i8* %ptr)
+define i32 @outer7(ptr %ptr) {
+ %C = call i32 @inner7(ptr %ptr)
ret i32 %C
}
; CHECK: Analyzing call of inner7
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i32 @inner7(i8* %ptr) {
- %L = load i8, i8* %ptr
+define i32 @inner7(ptr %ptr) {
+ %L = load i8, ptr %ptr
%E = sext i8 %L to i32
ret i32 %E
}
-define i32 @outer8(i16* %ptr) {
- %C = call i32 @inner8(i16* %ptr)
+define i32 @outer8(ptr %ptr) {
+ %C = call i32 @inner8(ptr %ptr)
ret i32 %C
}
; CHECK: Analyzing call of inner8
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i32 @inner8(i16* %ptr) {
- %L = load i16, i16* %ptr
+define i32 @inner8(ptr %ptr) {
+ %L = load i16, ptr %ptr
%E = zext i16 %L to i32
ret i32 %E
}
-define i32 @outer9(i16* %ptr) {
- %C = call i32 @inner9(i16* %ptr)
+define i32 @outer9(ptr %ptr) {
+ %C = call i32 @inner9(ptr %ptr)
ret i32 %C
}
; CHECK: Analyzing call of inner9
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i32 @inner9(i16* %ptr) {
- %L = load i16, i16* %ptr
+define i32 @inner9(ptr %ptr) {
+ %L = load i16, ptr %ptr
%E = sext i16 %L to i32
ret i32 %E
}
-define i64 @outer10(i8* %ptr) {
- %C = call i64 @inner10(i8* %ptr)
+define i64 @outer10(ptr %ptr) {
+ %C = call i64 @inner10(ptr %ptr)
ret i64 %C
}
; CHECK: Analyzing call of inner10
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i64 @inner10(i8* %ptr) {
- %L = load i8, i8* %ptr
+define i64 @inner10(ptr %ptr) {
+ %L = load i8, ptr %ptr
%E = zext i8 %L to i64
ret i64 %E
}
-define i64 @outer11(i8* %ptr) {
- %C = call i64 @inner11(i8* %ptr)
+define i64 @outer11(ptr %ptr) {
+ %C = call i64 @inner11(ptr %ptr)
ret i64 %C
}
; CHECK: Analyzing call of inner11
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i64 @inner11(i8* %ptr) {
- %L = load i8, i8* %ptr
+define i64 @inner11(ptr %ptr) {
+ %L = load i8, ptr %ptr
%E = sext i8 %L to i64
ret i64 %E
}
-define i64 @outer12(i16* %ptr) {
- %C = call i64 @inner12(i16* %ptr)
+define i64 @outer12(ptr %ptr) {
+ %C = call i64 @inner12(ptr %ptr)
ret i64 %C
}
; CHECK: Analyzing call of inner12
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i64 @inner12(i16* %ptr) {
- %L = load i16, i16* %ptr
+define i64 @inner12(ptr %ptr) {
+ %L = load i16, ptr %ptr
%E = zext i16 %L to i64
ret i64 %E
}
-define i64 @outer13(i16* %ptr) {
- %C = call i64 @inner13(i16* %ptr)
+define i64 @outer13(ptr %ptr) {
+ %C = call i64 @inner13(ptr %ptr)
ret i64 %C
}
; CHECK: Analyzing call of inner13
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i64 @inner13(i16* %ptr) {
- %L = load i16, i16* %ptr
+define i64 @inner13(ptr %ptr) {
+ %L = load i16, ptr %ptr
%E = sext i16 %L to i64
ret i64 %E
}
-define i64 @outer14(i32* %ptr) {
- %C = call i64 @inner14(i32* %ptr)
+define i64 @outer14(ptr %ptr) {
+ %C = call i64 @inner14(ptr %ptr)
ret i64 %C
}
; CHECK: Analyzing call of inner14
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i64 @inner14(i32* %ptr) {
- %L = load i32, i32* %ptr
+define i64 @inner14(ptr %ptr) {
+ %L = load i32, ptr %ptr
%E = zext i32 %L to i64
ret i64 %E
}
-define i64 @outer15(i32* %ptr) {
- %C = call i64 @inner15(i32* %ptr)
+define i64 @outer15(ptr %ptr) {
+ %C = call i64 @inner15(ptr %ptr)
ret i64 %C
}
; CHECK: Analyzing call of inner15
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i64 @inner15(i32* %ptr) {
- %L = load i32, i32* %ptr
+define i64 @inner15(ptr %ptr) {
+ %L = load i32, ptr %ptr
%E = sext i32 %L to i64
ret i64 %E
}
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64le-ibm-linux-gnu"
-define i16 @outer1(i8* %ptr) {
- %C = call i16 @inner1(i8* %ptr)
+define i16 @outer1(ptr %ptr) {
+ %C = call i16 @inner1(ptr %ptr)
ret i16 %C
}
; CHECK: Analyzing call of inner1
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i16 @inner1(i8* %ptr) {
- %L = load i8, i8* %ptr
+define i16 @inner1(ptr %ptr) {
+ %L = load i8, ptr %ptr
%E = zext i8 %L to i16
ret i16 %E
}
-define i32 @outer2(i8* %ptr) {
- %C = call i32 @inner2(i8* %ptr)
+define i32 @outer2(ptr %ptr) {
+ %C = call i32 @inner2(ptr %ptr)
ret i32 %C
}
; CHECK: Analyzing call of inner2
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i32 @inner2(i8* %ptr) {
- %L = load i8, i8* %ptr
+define i32 @inner2(ptr %ptr) {
+ %L = load i8, ptr %ptr
%E = zext i8 %L to i32
ret i32 %E
}
-define i32 @outer3(i16* %ptr) {
- %C = call i32 @inner3(i16* %ptr)
+define i32 @outer3(ptr %ptr) {
+ %C = call i32 @inner3(ptr %ptr)
ret i32 %C
}
; CHECK: Analyzing call of inner3
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i32 @inner3(i16* %ptr) {
- %L = load i16, i16* %ptr
+define i32 @inner3(ptr %ptr) {
+ %L = load i16, ptr %ptr
%E = zext i16 %L to i32
ret i32 %E
}
-define i32 @outer4(i16* %ptr) {
- %C = call i32 @inner4(i16* %ptr)
+define i32 @outer4(ptr %ptr) {
+ %C = call i32 @inner4(ptr %ptr)
ret i32 %C
}
; CHECK: Analyzing call of inner4
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i32 @inner4(i16* %ptr) {
- %L = load i16, i16* %ptr
+define i32 @inner4(ptr %ptr) {
+ %L = load i16, ptr %ptr
%E = sext i16 %L to i32
ret i32 %E
}
-define i64 @outer5(i8* %ptr) {
- %C = call i64 @inner5(i8* %ptr)
+define i64 @outer5(ptr %ptr) {
+ %C = call i64 @inner5(ptr %ptr)
ret i64 %C
}
; CHECK: Analyzing call of inner5
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i64 @inner5(i8* %ptr) {
- %L = load i8, i8* %ptr
+define i64 @inner5(ptr %ptr) {
+ %L = load i8, ptr %ptr
%E = zext i8 %L to i64
ret i64 %E
}
-define i64 @outer6(i16* %ptr) {
- %C = call i64 @inner6(i16* %ptr)
+define i64 @outer6(ptr %ptr) {
+ %C = call i64 @inner6(ptr %ptr)
ret i64 %C
}
; CHECK: Analyzing call of inner6
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i64 @inner6(i16* %ptr) {
- %L = load i16, i16* %ptr
+define i64 @inner6(ptr %ptr) {
+ %L = load i16, ptr %ptr
%E = zext i16 %L to i64
ret i64 %E
}
-define i64 @outer7(i16* %ptr) {
- %C = call i64 @inner7(i16* %ptr)
+define i64 @outer7(ptr %ptr) {
+ %C = call i64 @inner7(ptr %ptr)
ret i64 %C
}
; CHECK: Analyzing call of inner7
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i64 @inner7(i16* %ptr) {
- %L = load i16, i16* %ptr
+define i64 @inner7(ptr %ptr) {
+ %L = load i16, ptr %ptr
%E = sext i16 %L to i64
ret i64 %E
}
-define i64 @outer8(i32* %ptr) {
- %C = call i64 @inner8(i32* %ptr)
+define i64 @outer8(ptr %ptr) {
+ %C = call i64 @inner8(ptr %ptr)
ret i64 %C
}
; CHECK: Analyzing call of inner8
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i64 @inner8(i32* %ptr) {
- %L = load i32, i32* %ptr
+define i64 @inner8(ptr %ptr) {
+ %L = load i32, ptr %ptr
%E = zext i32 %L to i64
ret i64 %E
}
-define i64 @outer9(i32* %ptr) {
- %C = call i64 @inner9(i32* %ptr)
+define i64 @outer9(ptr %ptr) {
+ %C = call i64 @inner9(ptr %ptr)
ret i64 %C
}
; CHECK: Analyzing call of inner9
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i64 @inner9(i32* %ptr) {
- %L = load i32, i32* %ptr
+define i64 @inner9(ptr %ptr) {
+ %L = load i32, ptr %ptr
%E = sext i32 %L to i64
ret i64 %E
}
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-unknown"
-define i32 @outer1(i32* %ptr, i32 %i) {
- %C = call i32 @inner1(i32* %ptr, i32 %i)
+define i32 @outer1(ptr %ptr, i32 %i) {
+ %C = call i32 @inner1(ptr %ptr, i32 %i)
ret i32 %C
}
; CHECK: Analyzing call of inner1
; CHECK: NumInstructionsSimplified: 3
; CHECK: NumInstructions: 4
-define i32 @inner1(i32* %ptr, i32 %i) {
+define i32 @inner1(ptr %ptr, i32 %i) {
%E = zext i32 %i to i64
- %G = getelementptr inbounds i32, i32* %ptr, i64 %E
- %L = load i32, i32* %G
+ %G = getelementptr inbounds i32, ptr %ptr, i64 %E
+ %L = load i32, ptr %G
ret i32 %L
}
-define i16 @outer2(i8* %ptr) {
- %C = call i16 @inner2(i8* %ptr)
+define i16 @outer2(ptr %ptr) {
+ %C = call i16 @inner2(ptr %ptr)
ret i16 %C
}
; CHECK: Analyzing call of inner2
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i16 @inner2(i8* %ptr) {
- %L = load i8, i8* %ptr
+define i16 @inner2(ptr %ptr) {
+ %L = load i8, ptr %ptr
%E = zext i8 %L to i16
ret i16 %E
}
-define i16 @outer3(i8* %ptr) {
- %C = call i16 @inner3(i8* %ptr)
+define i16 @outer3(ptr %ptr) {
+ %C = call i16 @inner3(ptr %ptr)
ret i16 %C
}
; CHECK: Analyzing call of inner3
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i16 @inner3(i8* %ptr) {
- %L = load i8, i8* %ptr
+define i16 @inner3(ptr %ptr) {
+ %L = load i8, ptr %ptr
%E = sext i8 %L to i16
ret i16 %E
}
-define i32 @outer4(i8* %ptr) {
- %C = call i32 @inner4(i8* %ptr)
+define i32 @outer4(ptr %ptr) {
+ %C = call i32 @inner4(ptr %ptr)
ret i32 %C
}
; CHECK: Analyzing call of inner4
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i32 @inner4(i8* %ptr) {
- %L = load i8, i8* %ptr
+define i32 @inner4(ptr %ptr) {
+ %L = load i8, ptr %ptr
%E = zext i8 %L to i32
ret i32 %E
}
-define i32 @outer5(i8* %ptr) {
- %C = call i32 @inner5(i8* %ptr)
+define i32 @outer5(ptr %ptr) {
+ %C = call i32 @inner5(ptr %ptr)
ret i32 %C
}
; CHECK: Analyzing call of inner5
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i32 @inner5(i8* %ptr) {
- %L = load i8, i8* %ptr
+define i32 @inner5(ptr %ptr) {
+ %L = load i8, ptr %ptr
%E = sext i8 %L to i32
ret i32 %E
}
-define i32 @outer6(i16* %ptr) {
- %C = call i32 @inner6(i16* %ptr)
+define i32 @outer6(ptr %ptr) {
+ %C = call i32 @inner6(ptr %ptr)
ret i32 %C
}
; CHECK: Analyzing call of inner6
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i32 @inner6(i16* %ptr) {
- %L = load i16, i16* %ptr
+define i32 @inner6(ptr %ptr) {
+ %L = load i16, ptr %ptr
%E = zext i16 %L to i32
ret i32 %E
}
-define i32 @outer7(i16* %ptr) {
- %C = call i32 @inner7(i16* %ptr)
+define i32 @outer7(ptr %ptr) {
+ %C = call i32 @inner7(ptr %ptr)
ret i32 %C
}
; CHECK: Analyzing call of inner7
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i32 @inner7(i16* %ptr) {
- %L = load i16, i16* %ptr
+define i32 @inner7(ptr %ptr) {
+ %L = load i16, ptr %ptr
%E = sext i16 %L to i32
ret i32 %E
}
-define i64 @outer8(i8* %ptr) {
- %C = call i64 @inner8(i8* %ptr)
+define i64 @outer8(ptr %ptr) {
+ %C = call i64 @inner8(ptr %ptr)
ret i64 %C
}
; CHECK: Analyzing call of inner8
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i64 @inner8(i8* %ptr) {
- %L = load i8, i8* %ptr
+define i64 @inner8(ptr %ptr) {
+ %L = load i8, ptr %ptr
%E = zext i8 %L to i64
ret i64 %E
}
-define i64 @outer9(i8* %ptr) {
- %C = call i64 @inner9(i8* %ptr)
+define i64 @outer9(ptr %ptr) {
+ %C = call i64 @inner9(ptr %ptr)
ret i64 %C
}
; CHECK: Analyzing call of inner9
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i64 @inner9(i8* %ptr) {
- %L = load i8, i8* %ptr
+define i64 @inner9(ptr %ptr) {
+ %L = load i8, ptr %ptr
%E = sext i8 %L to i64
ret i64 %E
}
-define i64 @outer10(i16* %ptr) {
- %C = call i64 @inner10(i16* %ptr)
+define i64 @outer10(ptr %ptr) {
+ %C = call i64 @inner10(ptr %ptr)
ret i64 %C
}
; CHECK: Analyzing call of inner10
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i64 @inner10(i16* %ptr) {
- %L = load i16, i16* %ptr
+define i64 @inner10(ptr %ptr) {
+ %L = load i16, ptr %ptr
%E = zext i16 %L to i64
ret i64 %E
}
-define i64 @outer11(i16* %ptr) {
- %C = call i64 @inner11(i16* %ptr)
+define i64 @outer11(ptr %ptr) {
+ %C = call i64 @inner11(ptr %ptr)
ret i64 %C
}
; CHECK: Analyzing call of inner11
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i64 @inner11(i16* %ptr) {
- %L = load i16, i16* %ptr
+define i64 @inner11(ptr %ptr) {
+ %L = load i16, ptr %ptr
%E = sext i16 %L to i64
ret i64 %E
}
-define i64 @outer12(i32* %ptr) {
- %C = call i64 @inner12(i32* %ptr)
+define i64 @outer12(ptr %ptr) {
+ %C = call i64 @inner12(ptr %ptr)
ret i64 %C
}
; CHECK: Analyzing call of inner12
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i64 @inner12(i32* %ptr) {
- %L = load i32, i32* %ptr
+define i64 @inner12(ptr %ptr) {
+ %L = load i32, ptr %ptr
%E = zext i32 %L to i64
ret i64 %E
}
-define i64 @outer13(i32* %ptr) {
- %C = call i64 @inner13(i32* %ptr)
+define i64 @outer13(ptr %ptr) {
+ %C = call i64 @inner13(ptr %ptr)
ret i64 %C
}
; CHECK: Analyzing call of inner13
; CHECK: NumInstructionsSimplified: 2
; CHECK: NumInstructions: 3
-define i64 @inner13(i32* %ptr) {
- %L = load i32, i32* %ptr
+define i64 @inner13(ptr %ptr) {
+ %L = load i32, ptr %ptr
%E = sext i32 %L to i64
ret i64 %E
}
target datalayout = "p:32:32"
-declare void @llvm.lifetime.start.p0i8(i64 %size, i8* nocapture %ptr)
+declare void @llvm.lifetime.start.p0(i64 %size, ptr nocapture %ptr)
@glbl = external global i32
; CHECK-LABEL: @outer1(
; CHECK-NOT: call void @inner1
%ptr = alloca i32
- call void @inner1(i32* %ptr)
+ call void @inner1(ptr %ptr)
ret void
}
-define void @inner1(i32 *%ptr) {
- %A = load i32, i32* %ptr
- store i32 0, i32* %ptr
- %C = getelementptr inbounds i32, i32* %ptr, i32 0
- %D = getelementptr inbounds i32, i32* %ptr, i32 1
- %E = bitcast i32* %ptr to i8*
- %F = select i1 false, i32* %ptr, i32* @glbl
- call void @llvm.lifetime.start.p0i8(i64 0, i8* %E)
+define void @inner1(ptr %ptr) {
+ %A = load i32, ptr %ptr
+ store i32 0, ptr %ptr
+ %D = getelementptr inbounds i32, ptr %ptr, i32 1
+ %F = select i1 false, ptr %ptr, ptr @glbl
+ call void @llvm.lifetime.start.p0(i64 0, ptr %ptr)
call void @extern()
ret void
}
; CHECK-LABEL: @outer2(
; CHECK: call void @inner2
%ptr = alloca i32
- call void @inner2(i32* %ptr)
+ call void @inner2(ptr %ptr)
ret void
}
; %D poisons this call, scalar-repl can't handle that instruction.
-define void @inner2(i32 *%ptr) {
- %A = load i32, i32* %ptr
- store i32 0, i32* %ptr
- %C = getelementptr inbounds i32, i32* %ptr, i32 0
- %D = getelementptr inbounds i32, i32* %ptr, i32 %A
- %E = bitcast i32* %ptr to i8*
- %F = select i1 false, i32* %ptr, i32* @glbl
- call void @llvm.lifetime.start.p0i8(i64 0, i8* %E)
+define void @inner2(ptr %ptr) {
+ %A = load i32, ptr %ptr
+ store i32 0, ptr %ptr
+ %D = getelementptr inbounds i32, ptr %ptr, i32 %A
+ %F = select i1 false, ptr %ptr, ptr @glbl
+ call void @llvm.lifetime.start.p0(i64 0, ptr %ptr)
call void @extern()
ret void
}
; CHECK-LABEL: @outer3(
; CHECK-NOT: call void @inner3
%ptr = alloca i32
- call void @inner3(i32* %ptr, i1 undef)
+ call void @inner3(ptr %ptr, i1 undef)
ret void
}
-define void @inner3(i32 *%ptr, i1 %x) {
- %A = icmp eq i32* %ptr, null
+define void @inner3(ptr %ptr, i1 %x) {
+ %A = icmp eq ptr %ptr, null
%B = and i1 %x, %A
call void @extern()
br i1 %A, label %bb.true, label %bb.false
bb.true:
; This block musn't be counted in the inline cost.
- %t1 = load i32, i32* %ptr
+ %t1 = load i32, ptr %ptr
%t2 = add i32 %t1, 1
%t3 = add i32 %t2, 1
%t4 = add i32 %t3, 1
; CHECK-LABEL: @outer4(
; CHECK-NOT: call void @inner4
%ptr = alloca i32
- call void @inner4(i32* %ptr, i32 %A)
+ call void @inner4(ptr %ptr, i32 %A)
ret void
}
; %B poisons this call, scalar-repl can't handle that instruction. However, we
; still want to detect that the icmp and branch *can* be handled.
-define void @inner4(i32 *%ptr, i32 %A) {
- %B = getelementptr inbounds i32, i32* %ptr, i32 %A
- %C = icmp eq i32* %ptr, null
+define void @inner4(ptr %ptr, i32 %A) {
+ %B = getelementptr inbounds i32, ptr %ptr, i32 %A
+ %C = icmp eq ptr %ptr, null
call void @extern()
br i1 %C, label %bb.true, label %bb.false
bb.true:
; This block musn't be counted in the inline cost.
- %t1 = load i32, i32* %ptr
+ %t1 = load i32, ptr %ptr
%t2 = add i32 %t1, 1
%t3 = add i32 %t2, 1
%t4 = add i32 %t3, 1
; CHECK-LABEL: @outer5(
; CHECK-NOT: call void @inner5
%ptr = alloca i32
- call void @inner5(i1 false, i32* %ptr)
+ call void @inner5(i1 false, ptr %ptr)
ret void
}
; %D poisons this call, scalar-repl can't handle that instruction. However, if
; the flag is set appropriately, the poisoning instruction is inside of dead
; code, and so shouldn't be counted.
-define void @inner5(i1 %flag, i32 *%ptr) {
- %A = load i32, i32* %ptr
- store i32 0, i32* %ptr
+define void @inner5(i1 %flag, ptr %ptr) {
+ %A = load i32, ptr %ptr
+ store i32 0, ptr %ptr
call void @extern()
- %C = getelementptr inbounds i32, i32* %ptr, i32 0
br i1 %flag, label %if.then, label %exit
if.then:
- %D = getelementptr inbounds i32, i32* %ptr, i32 %A
- %E = bitcast i32* %ptr to i8*
- %F = select i1 false, i32* %ptr, i32* @glbl
- call void @llvm.lifetime.start.p0i8(i64 0, i8* %E)
+ %D = getelementptr inbounds i32, ptr %ptr, i32 %A
+ %F = select i1 false, ptr %ptr, ptr @glbl
+ call void @llvm.lifetime.start.p0(i64 0, ptr %ptr)
ret void
exit:
declare void @llvm.experimental.guard(i1, ...)
-define i8 @callee(i1* %c_ptr) alwaysinline {
- %c = load volatile i1, i1* %c_ptr
+define i8 @callee(ptr %c_ptr) alwaysinline {
+ %c = load volatile i1, ptr %c_ptr
call void(i1, ...) @llvm.experimental.guard(i1 %c, i32 1) [ "deopt"(i32 1) ]
ret i8 5
}
-define void @caller_0(i1* %c, i8* %ptr) {
+define void @caller_0(ptr %c, ptr %ptr) {
; CHECK-LABEL: @caller_0(
entry:
-; CHECK: [[COND:%[^ ]+]] = load volatile i1, i1* %c
+; CHECK: [[COND:%[^ ]+]] = load volatile i1, ptr %c
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND]], i32 1) [ "deopt"(i32 2, i32 1) ]
-; CHECK-NEXT: store i8 5, i8* %ptr
+; CHECK-NEXT: store i8 5, ptr %ptr
- %v = call i8 @callee(i1* %c) [ "deopt"(i32 2) ]
- store i8 %v, i8* %ptr
+ %v = call i8 @callee(ptr %c) [ "deopt"(i32 2) ]
+ store i8 %v, ptr %ptr
ret void
}
-define i32 @caller_1(i1* %c, i8* %ptr) personality i8 3 {
+define i32 @caller_1(ptr %c, ptr %ptr) personality i8 3 {
; CHECK-LABEL: @caller_1(
-; CHECK: [[COND:%[^ ]+]] = load volatile i1, i1* %c
+; CHECK: [[COND:%[^ ]+]] = load volatile i1, ptr %c
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND]], i32 1) [ "deopt"(i32 3, i32 1) ]
; CHECK-NEXT: br label %normal
entry:
- %v = invoke i8 @callee(i1* %c) [ "deopt"(i32 3) ] to label %normal
+ %v = invoke i8 @callee(ptr %c) [ "deopt"(i32 3) ] to label %normal
unwind label %unwind
unwind:
ret i32 43
normal:
- store i8 %v, i8* %ptr
+ store i8 %v, ptr %ptr
ret i32 42
}
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128"
; ALL-LABEL: @small_stride(
-define void @small_stride(double* nocapture %a, double* nocapture readonly %b) {
+define void @small_stride(ptr nocapture %a, ptr nocapture readonly %b) {
entry:
br label %for.body
; ALL: for.body:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double, double* %b, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, ptr %b, i64 %indvars.iv
; ALL-NOT: call void @llvm.prefetch
- %0 = load double, double* %arrayidx, align 8
+ %0 = load double, ptr %arrayidx, align 8
%add = fadd double %0, 1.000000e+00
- %arrayidx2 = getelementptr inbounds double, double* %a, i64 %indvars.iv
- store double %add, double* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds double, ptr %a, i64 %indvars.iv
+ store double %add, ptr %arrayidx2, align 8
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 1600
br i1 %exitcond, label %for.end, label %for.body
}
; ALL-LABEL: @large_stride(
-define void @large_stride(double* nocapture %a, double* nocapture readonly %b) {
+define void @large_stride(ptr nocapture %a, ptr nocapture readonly %b) {
entry:
br label %for.body
; ALL: for.body:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double, double* %b, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, ptr %b, i64 %indvars.iv
; LARGE_PREFETCH: call void @llvm.prefetch
; NO_LARGE_PREFETCH-NOT: call void @llvm.prefetch
- %0 = load double, double* %arrayidx, align 8
+ %0 = load double, ptr %arrayidx, align 8
%add = fadd double %0, 1.000000e+00
- %arrayidx2 = getelementptr inbounds double, double* %a, i64 %indvars.iv
- store double %add, double* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds double, ptr %a, i64 %indvars.iv
+ store double %add, ptr %arrayidx2, align 8
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 150
%exitcond = icmp eq i64 %indvars.iv.next, 160000
br i1 %exitcond, label %for.end, label %for.body
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128"
; ALL-LABEL: @small_stride(
-define void @small_stride(double* nocapture %a, double* nocapture readonly %b) {
+define void @small_stride(ptr nocapture %a, ptr nocapture readonly %b) {
entry:
br label %for.body
; ALL: for.body:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double, double* %b, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, ptr %b, i64 %indvars.iv
; ALL-NOT: call void @llvm.prefetch
- %0 = load double, double* %arrayidx, align 8
+ %0 = load double, ptr %arrayidx, align 8
%add = fadd double %0, 1.000000e+00
- %arrayidx2 = getelementptr inbounds double, double* %a, i64 %indvars.iv
- store double %add, double* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds double, ptr %a, i64 %indvars.iv
+ store double %add, ptr %arrayidx2, align 8
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 1600
br i1 %exitcond, label %for.end, label %for.body
}
; ALL-LABEL: @large_stride(
-define void @large_stride(double* nocapture %a, double* nocapture readonly %b) {
+define void @large_stride(ptr nocapture %a, ptr nocapture readonly %b) {
entry:
br label %for.body
; ALL: for.body:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double, double* %b, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, ptr %b, i64 %indvars.iv
; LARGE_PREFETCH: call void @llvm.prefetch
; NO_LARGE_PREFETCH-NOT: call void @llvm.prefetch
- %0 = load double, double* %arrayidx, align 8
+ %0 = load double, ptr %arrayidx, align 8
%add = fadd double %0, 1.000000e+00
- %arrayidx2 = getelementptr inbounds double, double* %a, i64 %indvars.iv
- store double %add, double* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds double, ptr %a, i64 %indvars.iv
+ store double %add, ptr %arrayidx2, align 8
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 300
%exitcond = icmp eq i64 %indvars.iv.next, 160000
br i1 %exitcond, label %for.end, label %for.body
%struct.MyStruct = type { i32, [2044 x i8] }
-@my_struct = common global %struct.MyStruct* null, align 8
+@my_struct = common global ptr null, align 8
-define i32 @f(%struct.MyStruct* nocapture readnone %p, i32 %N) !dbg !6 !prof !21 {
+define i32 @f(ptr nocapture readnone %p, i32 %N) !dbg !6 !prof !21 {
entry:
%cmp6 = icmp sgt i32 %N, 0, !dbg !8
br i1 %cmp6, label %for.body.lr.ph, label %for.cond.cleanup, !dbg !9, !prof !22
for.body.lr.ph: ; preds = %entry
- %0 = load %struct.MyStruct*, %struct.MyStruct** @my_struct, align 8, !dbg !10, !tbaa !11
+ %0 = load ptr, ptr @my_struct, align 8, !dbg !10, !tbaa !11
br label %for.body, !dbg !9
for.cond.cleanup: ; preds = %for.body, %entry
for.body: ; preds = %for.body, %for.body.lr.ph
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
%total.07 = phi i32 [ 0, %for.body.lr.ph ], [ %add, %for.body ]
- %field = getelementptr inbounds %struct.MyStruct, %struct.MyStruct* %0, i64 %indvars.iv, i32 0, !dbg !16
- %1 = load i32, i32* %field, align 4, !dbg !16, !tbaa !17
+ %field = getelementptr inbounds %struct.MyStruct, ptr %0, i64 %indvars.iv, i32 0, !dbg !16
+ %1 = load i32, ptr %field, align 4, !dbg !16, !tbaa !17
%add = add nsw i32 %1, %total.07, !dbg !20
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !9
%lftr.wideiv = trunc i64 %indvars.iv.next to i32, !dbg !9
%struct.MyStruct = type { i32, [2044 x i8] }
-@my_struct = common global %struct.MyStruct* null, align 8
+@my_struct = common global ptr null, align 8
-define i32 @f(%struct.MyStruct* nocapture readnone %p, i32 %N) !dbg !6 {
+define i32 @f(ptr nocapture readnone %p, i32 %N) !dbg !6 {
entry:
%cmp6 = icmp sgt i32 %N, 0, !dbg !8
br i1 %cmp6, label %for.body.lr.ph, label %for.cond.cleanup, !dbg !9
for.body.lr.ph: ; preds = %entry
- %0 = load %struct.MyStruct*, %struct.MyStruct** @my_struct, align 8, !dbg !10, !tbaa !11
+ %0 = load ptr, ptr @my_struct, align 8, !dbg !10, !tbaa !11
br label %for.body, !dbg !9
for.cond.cleanup: ; preds = %for.body, %entry
for.body: ; preds = %for.body, %for.body.lr.ph
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
%total.07 = phi i32 [ 0, %for.body.lr.ph ], [ %add, %for.body ]
- %field = getelementptr inbounds %struct.MyStruct, %struct.MyStruct* %0, i64 %indvars.iv, i32 0, !dbg !16
- %1 = load i32, i32* %field, align 4, !dbg !16, !tbaa !17
+ %field = getelementptr inbounds %struct.MyStruct, ptr %0, i64 %indvars.iv, i32 0, !dbg !16
+ %1 = load i32, ptr %field, align 4, !dbg !16, !tbaa !17
%add = add nsw i32 %1, %total.07, !dbg !20
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !9
%lftr.wideiv = trunc i64 %indvars.iv.next to i32, !dbg !9
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "aarch64-unknown-linux-gnu"
-%struct._Chv = type { i32, i32, i32, i32, i32, i32, i32*, i32*, double*, %struct._DV, %struct._Chv* }
-%struct._DV = type { i32, i32, i32, double* }
+%struct._Chv = type { i32, i32, i32, i32, i32, i32, ptr, ptr, ptr, %struct._DV, ptr }
+%struct._DV = type { i32, i32, i32, ptr }
-declare double* @f_entries() local_unnamed_addr
+declare ptr @f_entries() local_unnamed_addr
-define i32 @f(%struct._Chv* %chv, i32 %npivot, i32* %pivotsizes, i32* %sizes) local_unnamed_addr {
+define i32 @f(ptr %chv, i32 %npivot, ptr %pivotsizes, ptr %sizes) local_unnamed_addr {
if.end:
switch i32 undef, label %sw.default [
i32 1, label %sw.epilog
br label %if.end12
if.end12: ; preds = %sw.epilog
- %nD13 = getelementptr inbounds %struct._Chv, %struct._Chv* %chv, i64 0, i32 1
- %0 = load i32, i32* %nD13, align 4
- %nU15 = getelementptr inbounds %struct._Chv, %struct._Chv* %chv, i64 0, i32 3
- %1 = load i32, i32* %nU15, align 4
+ %nD13 = getelementptr inbounds %struct._Chv, ptr %chv, i64 0, i32 1
+ %0 = load i32, ptr %nD13, align 4
+ %nU15 = getelementptr inbounds %struct._Chv, ptr %chv, i64 0, i32 3
+ %1 = load i32, ptr %nU15, align 4
%add17 = add i32 %1, %0
- %call18 = call double* @f_entries()
+ %call18 = call ptr @f_entries()
switch i32 undef, label %sw.epilog2454 [
i32 3, label %sw.bb213
]
br label %if.then220
if.then220: ; preds = %sw.bb214
- %type230 = getelementptr inbounds %struct._Chv, %struct._Chv* %chv, i64 0, i32 4
- %2 = load i32, i32* %type230, align 8
+ %type230 = getelementptr inbounds %struct._Chv, ptr %chv, i64 0, i32 4
+ %2 = load i32, ptr %type230, align 8
br label %if.else319
if.else319: ; preds = %if.then220
br label %for.body374
for.body374: ; preds = %for.body374.lr.ph
- %arrayidx376 = getelementptr inbounds i32, i32* %pivotsizes, i64 0
- %3 = load i32, i32* %arrayidx376, align 4
+ %3 = load i32, ptr %pivotsizes, align 4
%add377 = add i32 %3, 0
br label %for.body381.lr.ph
%kk224.3.us = add nsw i32 %kk224.3.in4330.us, 1
%mul389.us = shl nsw i32 %kk224.3.us, 1
%idxprom390.us = sext i32 %mul389.us to i64
- %arrayidx391.us = getelementptr inbounds double, double* %call18, i64 %idxprom390.us
- %6 = load double, double* %arrayidx391.us, align 8
+ %arrayidx391.us = getelementptr inbounds double, ptr %call18, i64 %idxprom390.us
+ %6 = load double, ptr %arrayidx391.us, align 8
%call396.us = call double @Zabs(double %6)
br label %for.inc418.us
declare double @llvm.fabs.f64(double)
declare dso_local double @Zabs(double) local_unnamed_addr
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg)
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
; RUN: opt -mcpu=a2 -passes=loop-data-prefetch -mtriple=powerpc64le-unknown-linux -enable-ppc-prefetching -S < %s | FileCheck %s
target datalayout = "E-m:e-i64:64-n32:64"
-define void @foo(double* nocapture %a, double* nocapture readonly %b) {
+define void @foo(ptr nocapture %a, ptr nocapture readonly %b) {
entry:
br label %for.body
; CHECK: for.body:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double, double* %b, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, ptr %b, i64 %indvars.iv
; CHECK: call void @llvm.prefetch
- %0 = load double, double* %arrayidx, align 8
+ %0 = load double, ptr %arrayidx, align 8
%add = fadd double %0, 1.000000e+00
- %arrayidx2 = getelementptr inbounds double, double* %a, i64 %indvars.iv
- store double %add, double* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds double, ptr %a, i64 %indvars.iv
+ store double %add, ptr %arrayidx2, align 8
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 1600
br i1 %exitcond, label %for.end, label %for.body
target triple = "x86_64-apple-macosx10.10.0"
; CHECK-LABEL: @f(
-define void @f(i32* noalias %a,
- i32* noalias %b,
- i32* noalias %c,
- i32* noalias %d,
- i32* noalias %e) {
+define void @f(ptr noalias %a,
+ ptr noalias %b,
+ ptr noalias %c,
+ ptr noalias %d,
+ ptr noalias %e) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
+ %loadA = load i32, ptr %arrayidxA, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
- %loadB = load i32, i32* %arrayidxB, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %ind
+ %loadB = load i32, ptr %arrayidxB, align 4
%mulA = mul i32 %loadB, %loadA
%add = add nuw nsw i64 %ind, 1
- %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
- store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i64 %add
+ store i32 %mulA, ptr %arrayidxA_plus_4, align 4
- %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
- %loadD = load i32, i32* %arrayidxD, align 4
+ %arrayidxD = getelementptr inbounds i32, ptr %d, i64 %ind
+ %loadD = load i32, ptr %arrayidxD, align 4
- %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
- %loadE = load i32, i32* %arrayidxE, align 4
+ %arrayidxE = getelementptr inbounds i32, ptr %e, i64 %ind
+ %loadE = load i32, ptr %arrayidxE, align 4
%mulC = mul i32 %loadD, %loadE
- %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
- store i32 %mulC, i32* %arrayidxC, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %ind
+ store i32 %mulC, ptr %arrayidxC, align 4
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body
; It is OK to distribute with a convergent operation, since in each
; new loop the convergent operation has the ssame control dependency.
; CHECK-LABEL: @f_with_convergent(
-define void @f_with_convergent(i32* noalias %a,
- i32* noalias %b,
- i32* noalias %c,
- i32* noalias %d,
- i32* noalias %e) {
+define void @f_with_convergent(ptr noalias %a,
+ ptr noalias %b,
+ ptr noalias %c,
+ ptr noalias %d,
+ ptr noalias %e) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
+ %loadA = load i32, ptr %arrayidxA, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
- %loadB = load i32, i32* %arrayidxB, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %ind
+ %loadB = load i32, ptr %arrayidxB, align 4
%mulA = mul i32 %loadB, %loadA
%add = add nuw nsw i64 %ind, 1
- %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
- store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i64 %add
+ store i32 %mulA, ptr %arrayidxA_plus_4, align 4
- %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
- %loadD = load i32, i32* %arrayidxD, align 4
+ %arrayidxD = getelementptr inbounds i32, ptr %d, i64 %ind
+ %loadD = load i32, ptr %arrayidxD, align 4
- %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
- %loadE = load i32, i32* %arrayidxE, align 4
+ %arrayidxE = getelementptr inbounds i32, ptr %e, i64 %ind
+ %loadE = load i32, ptr %arrayidxE, align 4
%convergentD = call i32 @llvm.convergent(i32 %loadD)
%mulC = mul i32 %convergentD, %loadE
- %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
- store i32 %mulC, i32* %arrayidxC, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %ind
+ store i32 %mulC, ptr %arrayidxC, align 4
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @f(i32* %a1, i32* %a2,
- i32* %b,
- i32* %c1, i32* %c2,
- i32* %d,
- i32* %e) {
+define void @f(ptr %a1, ptr %a2,
+ ptr %b,
+ ptr %c1, ptr %c2,
+ ptr %d,
+ ptr %e) {
entry:
- %cond = icmp eq i32* %e, null
+ %cond = icmp eq ptr %e, null
br i1 %cond, label %one, label %two
one:
br label %join
; %0 = bitcast i32* %c to i8* <--- old, invalidated
; %1 = bitcast i32* %a to i8*
- %a = phi i32* [%a1, %one], [%a2, %two]
- %c = phi i32* [%c1, %one], [%c2, %two]
+ %a = phi ptr [%a1, %one], [%a2, %two]
+ %c = phi ptr [%c1, %one], [%c2, %two]
br label %for.body
; CHECK: join
-; CHECK: {{%[0-9a-z]+}} = bitcast i32* %a to i8*
-; CHECK: {{%[0-9a-z]+}} = bitcast i32* %c to i8*
-; CHECK-NOT: bitcast i32* %c to i8*
-; CHECK-NOT: bitcast i32* %a to i8*
+; CHECK-NOT: bitcast ptr %c to ptr
+; CHECK-NOT: bitcast ptr %a to ptr
for.body: ; preds = %for.body, %entry
%ind = phi i64 [ 0, %join ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
+ %loadA = load i32, ptr %arrayidxA, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
- %loadB = load i32, i32* %arrayidxB, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %ind
+ %loadB = load i32, ptr %arrayidxB, align 4
%mulA = mul i32 %loadB, %loadA
%add = add nuw nsw i64 %ind, 1
- %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
- store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i64 %add
+ store i32 %mulA, ptr %arrayidxA_plus_4, align 4
- %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
- %loadD = load i32, i32* %arrayidxD, align 4
+ %arrayidxD = getelementptr inbounds i32, ptr %d, i64 %ind
+ %loadD = load i32, ptr %arrayidxD, align 4
- %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
- %loadE = load i32, i32* %arrayidxE, align 4
+ %arrayidxE = getelementptr inbounds i32, ptr %e, i64 %ind
+ %loadE = load i32, ptr %arrayidxE, align 4
%mulC = mul i32 %loadD, %loadE
- %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
- store i32 %mulC, i32* %arrayidxC, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %ind
+ store i32 %mulC, ptr %arrayidxC, align 4
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body
; op. LoopAccessAnalysis says that runtime checks are necessary, but
; none are cross partition, so none are truly needed.
-define void @f(i32* %a, i32* %b, i32* noalias %c, i32* noalias %d, i32* noalias %e) #1 {
+define void @f(ptr %a, ptr %b, ptr noalias %c, ptr noalias %d, ptr noalias %e) #1 {
; CHECK-LABEL: @f(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[ENTRY_SPLIT_LDIST1:%.*]]
; CHECK-NEXT: br label [[FOR_BODY_LDIST1:%.*]]
; CHECK: for.body.ldist1:
; CHECK-NEXT: [[IND_LDIST1:%.*]] = phi i64 [ 0, [[ENTRY_SPLIT_LDIST1]] ], [ [[ADD_LDIST1:%.*]], [[FOR_BODY_LDIST1]] ]
-; CHECK-NEXT: [[ARRAYIDXA_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[IND_LDIST1]]
-; CHECK-NEXT: [[LOADA_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXA_LDIST1]], align 4
-; CHECK-NEXT: [[ARRAYIDXB_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[IND_LDIST1]]
-; CHECK-NEXT: [[LOADB_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXB_LDIST1]], align 4
+; CHECK-NEXT: [[ARRAYIDXA_LDIST1:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IND_LDIST1]]
+; CHECK-NEXT: [[LOADA_LDIST1:%.*]] = load i32, ptr [[ARRAYIDXA_LDIST1]], align 4
+; CHECK-NEXT: [[ARRAYIDXB_LDIST1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[IND_LDIST1]]
+; CHECK-NEXT: [[LOADB_LDIST1:%.*]] = load i32, ptr [[ARRAYIDXB_LDIST1]], align 4
; CHECK-NEXT: [[MULA_LDIST1:%.*]] = mul i32 [[LOADB_LDIST1]], [[LOADA_LDIST1]]
; CHECK-NEXT: [[ADD_LDIST1]] = add nuw nsw i64 [[IND_LDIST1]], 1
-; CHECK-NEXT: [[ARRAYIDXA_PLUS_4_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LDIST1]]
-; CHECK-NEXT: store i32 [[MULA_LDIST1]], i32* [[ARRAYIDXA_PLUS_4_LDIST1]], align 4
+; CHECK-NEXT: [[ARRAYIDXA_PLUS_4_LDIST1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[ADD_LDIST1]]
+; CHECK-NEXT: store i32 [[MULA_LDIST1]], ptr [[ARRAYIDXA_PLUS_4_LDIST1]], align 4
; CHECK-NEXT: [[EXITCOND_LDIST1:%.*]] = icmp eq i64 [[ADD_LDIST1]], 20
; CHECK-NEXT: br i1 [[EXITCOND_LDIST1]], label [[ENTRY_SPLIT:%.*]], label [[FOR_BODY_LDIST1]]
; CHECK: entry.split:
; CHECK: for.body:
; CHECK-NEXT: [[IND:%.*]] = phi i64 [ 0, [[ENTRY_SPLIT]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ADD]] = add nuw nsw i64 [[IND]], 1
-; CHECK-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, i32* [[D:%.*]], i64 [[IND]]
-; CHECK-NEXT: [[LOADD:%.*]] = load i32, i32* [[ARRAYIDXD]], align 4
+; CHECK-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, ptr [[D:%.*]], i64 [[IND]]
+; CHECK-NEXT: [[LOADD:%.*]] = load i32, ptr [[ARRAYIDXD]], align 4
; CHECK-NEXT: [[CONVERGENTD:%.*]] = call i32 @llvm.convergent(i32 [[LOADD]])
-; CHECK-NEXT: [[ARRAYIDXE:%.*]] = getelementptr inbounds i32, i32* [[E:%.*]], i64 [[IND]]
-; CHECK-NEXT: [[LOADE:%.*]] = load i32, i32* [[ARRAYIDXE]], align 4
+; CHECK-NEXT: [[ARRAYIDXE:%.*]] = getelementptr inbounds i32, ptr [[E:%.*]], i64 [[IND]]
+; CHECK-NEXT: [[LOADE:%.*]] = load i32, ptr [[ARRAYIDXE]], align 4
; CHECK-NEXT: [[MULC:%.*]] = mul i32 [[CONVERGENTD]], [[LOADE]]
-; CHECK-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[IND]]
-; CHECK-NEXT: store i32 [[MULC]], i32* [[ARRAYIDXC]], align 4
+; CHECK-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[IND]]
+; CHECK-NEXT: store i32 [[MULC]], ptr [[ARRAYIDXC]], align 4
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], 20
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; CHECK: for.end:
for.body: ; preds = %for.body, %entry
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
+ %loadA = load i32, ptr %arrayidxA, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
- %loadB = load i32, i32* %arrayidxB, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %ind
+ %loadB = load i32, ptr %arrayidxB, align 4
%mulA = mul i32 %loadB, %loadA
%add = add nuw nsw i64 %ind, 1
- %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
- store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i64 %add
+ store i32 %mulA, ptr %arrayidxA_plus_4, align 4
- %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
- %loadD = load i32, i32* %arrayidxD, align 4
+ %arrayidxD = getelementptr inbounds i32, ptr %d, i64 %ind
+ %loadD = load i32, ptr %arrayidxD, align 4
%convergentD = call i32 @llvm.convergent(i32 %loadD)
- %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
- %loadE = load i32, i32* %arrayidxE, align 4
+ %arrayidxE = getelementptr inbounds i32, ptr %e, i64 %ind
+ %loadE = load i32, ptr %arrayidxE, align 4
%mulC = mul i32 %convergentD, %loadE
- %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
- store i32 %mulC, i32* %arrayidxC, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %ind
+ store i32 %mulC, ptr %arrayidxC, align 4
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.10.0"
-define void @f(i32* %a,
- i32* %b,
- i32* noalias %c,
- i32* noalias %d,
- i32* noalias %e) {
+define void @f(ptr %a,
+ ptr %b,
+ ptr noalias %c,
+ ptr noalias %d,
+ ptr noalias %e) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
+ %loadA = load i32, ptr %arrayidxA, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
- %loadB = load i32, i32* %arrayidxB, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %ind
+ %loadB = load i32, ptr %arrayidxB, align 4
%mulA = mul i32 %loadB, %loadA
%add = add nuw nsw i64 %ind, 1
- %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
- store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i64 %add
+ store i32 %mulA, ptr %arrayidxA_plus_4, align 4
- %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
- %loadD = load i32, i32* %arrayidxD, align 4
+ %arrayidxD = getelementptr inbounds i32, ptr %d, i64 %ind
+ %loadD = load i32, ptr %arrayidxD, align 4
- %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
- %loadE = load i32, i32* %arrayidxE, align 4
+ %arrayidxE = getelementptr inbounds i32, ptr %e, i64 %ind
+ %loadE = load i32, ptr %arrayidxE, align 4
%mulC = mul i32 %loadD, %loadE
- %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
- store i32 %mulC, i32* %arrayidxC, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %ind
+ store i32 %mulC, ptr %arrayidxC, align 4
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body
; NO_HOTNESS: remark: /tmp/t.c:3:3: loop not distributed: use -Rpass-analysis=loop-distribute for more info{{$}}
; NO_HOTNESS: remark: /tmp/t.c:3:3: loop not distributed: memory operations are safe for vectorization{{$}}
-define void @forced(i8* %A, i8* %B, i8* %C, i32 %N) !dbg !7 !prof !22 {
+define void @forced(ptr %A, ptr %B, ptr %C, i32 %N) !dbg !7 !prof !22 {
entry:
%cmp12 = icmp sgt i32 %N, 0, !dbg !9
br i1 %cmp12, label %ph, label %for.cond.cleanup, !dbg !10, !prof !23
for.body:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %ph ]
- %arrayidx = getelementptr inbounds i8, i8* %B, i64 %indvars.iv, !dbg !12
- %0 = load i8, i8* %arrayidx, align 1, !dbg !12, !tbaa !13
- %arrayidx2 = getelementptr inbounds i8, i8* %C, i64 %indvars.iv, !dbg !16
- %1 = load i8, i8* %arrayidx2, align 1, !dbg !16, !tbaa !13
+ %arrayidx = getelementptr inbounds i8, ptr %B, i64 %indvars.iv, !dbg !12
+ %0 = load i8, ptr %arrayidx, align 1, !dbg !12, !tbaa !13
+ %arrayidx2 = getelementptr inbounds i8, ptr %C, i64 %indvars.iv, !dbg !16
+ %1 = load i8, ptr %arrayidx2, align 1, !dbg !16, !tbaa !13
%mul = mul i8 %1, %0, !dbg !17
- %arrayidx6 = getelementptr inbounds i8, i8* %A, i64 %indvars.iv, !dbg !18
- store i8 %mul, i8* %arrayidx6, align 1, !dbg !19, !tbaa !13
+ %arrayidx6 = getelementptr inbounds i8, ptr %A, i64 %indvars.iv, !dbg !18
+ store i8 %mul, ptr %arrayidx6, align 1, !dbg !19, !tbaa !13
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !10
%lftr.wideiv = trunc i64 %indvars.iv.next to i32, !dbg !10
%exitcond = icmp eq i32 %lftr.wideiv, %N, !dbg !10
; ALWAYS: remark: /tmp/t.c:3:3: loop not distributed: memory operations are safe for vectorization
; ALWAYS: warning: /tmp/t.c:3:3: loop not distributed: failed explicitly specified loop distribution
-define void @forced(i8* %A, i8* %B, i8* %C, i32 %N) !dbg !7 {
+define void @forced(ptr %A, ptr %B, ptr %C, i32 %N) !dbg !7 {
entry:
%cmp12 = icmp sgt i32 %N, 0, !dbg !9
br i1 %cmp12, label %ph, label %for.cond.cleanup, !dbg !10
for.body:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %ph ]
- %arrayidx = getelementptr inbounds i8, i8* %B, i64 %indvars.iv, !dbg !12
- %0 = load i8, i8* %arrayidx, align 1, !dbg !12, !tbaa !13
- %arrayidx2 = getelementptr inbounds i8, i8* %C, i64 %indvars.iv, !dbg !16
- %1 = load i8, i8* %arrayidx2, align 1, !dbg !16, !tbaa !13
+ %arrayidx = getelementptr inbounds i8, ptr %B, i64 %indvars.iv, !dbg !12
+ %0 = load i8, ptr %arrayidx, align 1, !dbg !12, !tbaa !13
+ %arrayidx2 = getelementptr inbounds i8, ptr %C, i64 %indvars.iv, !dbg !16
+ %1 = load i8, ptr %arrayidx2, align 1, !dbg !16, !tbaa !13
%mul = mul i8 %1, %0, !dbg !17
- %arrayidx6 = getelementptr inbounds i8, i8* %A, i64 %indvars.iv, !dbg !18
- store i8 %mul, i8* %arrayidx6, align 1, !dbg !19, !tbaa !13
+ %arrayidx6 = getelementptr inbounds i8, ptr %A, i64 %indvars.iv, !dbg !18
+ store i8 %mul, ptr %arrayidx6, align 1, !dbg !19, !tbaa !13
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !10
%lftr.wideiv = trunc i64 %indvars.iv.next to i32, !dbg !10
%exitcond = icmp eq i32 %lftr.wideiv, %N, !dbg !10
; ANALYSIS_REMARKS: remark: /tmp/t.c:9:3: loop not distributed: memory operations are safe for vectorization
; ALWAYS-NOT: warning: /tmp/t.c:9:3: loop not distributed: failed explicitly specified loop distribution
-define void @not_forced(i8* %A, i8* %B, i8* %C, i32 %N) !dbg !22 {
+define void @not_forced(ptr %A, ptr %B, ptr %C, i32 %N) !dbg !22 {
entry:
%cmp12 = icmp sgt i32 %N, 0, !dbg !23
br i1 %cmp12, label %ph, label %for.cond.cleanup, !dbg !24
for.body:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %ph ]
- %arrayidx = getelementptr inbounds i8, i8* %B, i64 %indvars.iv, !dbg !26
- %0 = load i8, i8* %arrayidx, align 1, !dbg !26, !tbaa !13
- %arrayidx2 = getelementptr inbounds i8, i8* %C, i64 %indvars.iv, !dbg !27
- %1 = load i8, i8* %arrayidx2, align 1, !dbg !27, !tbaa !13
+ %arrayidx = getelementptr inbounds i8, ptr %B, i64 %indvars.iv, !dbg !26
+ %0 = load i8, ptr %arrayidx, align 1, !dbg !26, !tbaa !13
+ %arrayidx2 = getelementptr inbounds i8, ptr %C, i64 %indvars.iv, !dbg !27
+ %1 = load i8, ptr %arrayidx2, align 1, !dbg !27, !tbaa !13
%mul = mul i8 %1, %0, !dbg !28
- %arrayidx6 = getelementptr inbounds i8, i8* %A, i64 %indvars.iv, !dbg !29
- store i8 %mul, i8* %arrayidx6, align 1, !dbg !30, !tbaa !13
+ %arrayidx6 = getelementptr inbounds i8, ptr %A, i64 %indvars.iv, !dbg !29
+ store i8 %mul, ptr %arrayidx6, align 1, !dbg !30, !tbaa !13
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !24
%lftr.wideiv = trunc i64 %indvars.iv.next to i32, !dbg !24
%exitcond = icmp eq i32 %lftr.wideiv, %N, !dbg !24
; REMARKS: remark: /tmp/t.c:15:3: distributed loop
-define void @success(i8* %A, i8* %B, i8* %C, i8* %D, i8* %E, i32 %N) !dbg !31 {
+define void @success(ptr %A, ptr %B, ptr %C, ptr %D, ptr %E, i32 %N) !dbg !31 {
entry:
%cmp28 = icmp sgt i32 %N, 0, !dbg !32
br i1 %cmp28, label %ph, label %for.cond.cleanup, !dbg !33
for.body:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %ph ]
- %arrayidx = getelementptr inbounds i8, i8* %A, i64 %indvars.iv, !dbg !35
- %0 = load i8, i8* %arrayidx, align 1, !dbg !35, !tbaa !13
- %arrayidx2 = getelementptr inbounds i8, i8* %B, i64 %indvars.iv, !dbg !36
- %1 = load i8, i8* %arrayidx2, align 1, !dbg !36, !tbaa !13
+ %arrayidx = getelementptr inbounds i8, ptr %A, i64 %indvars.iv, !dbg !35
+ %0 = load i8, ptr %arrayidx, align 1, !dbg !35, !tbaa !13
+ %arrayidx2 = getelementptr inbounds i8, ptr %B, i64 %indvars.iv, !dbg !36
+ %1 = load i8, ptr %arrayidx2, align 1, !dbg !36, !tbaa !13
%add = add i8 %1, %0, !dbg !37
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !33
- %arrayidx7 = getelementptr inbounds i8, i8* %A, i64 %indvars.iv.next, !dbg !38
- store i8 %add, i8* %arrayidx7, align 1, !dbg !39, !tbaa !13
- %arrayidx9 = getelementptr inbounds i8, i8* %D, i64 %indvars.iv, !dbg !40
- %2 = load i8, i8* %arrayidx9, align 1, !dbg !40, !tbaa !13
- %arrayidx12 = getelementptr inbounds i8, i8* %E, i64 %indvars.iv, !dbg !41
- %3 = load i8, i8* %arrayidx12, align 1, !dbg !41, !tbaa !13
+ %arrayidx7 = getelementptr inbounds i8, ptr %A, i64 %indvars.iv.next, !dbg !38
+ store i8 %add, ptr %arrayidx7, align 1, !dbg !39, !tbaa !13
+ %arrayidx9 = getelementptr inbounds i8, ptr %D, i64 %indvars.iv, !dbg !40
+ %2 = load i8, ptr %arrayidx9, align 1, !dbg !40, !tbaa !13
+ %arrayidx12 = getelementptr inbounds i8, ptr %E, i64 %indvars.iv, !dbg !41
+ %3 = load i8, ptr %arrayidx12, align 1, !dbg !41, !tbaa !13
%mul = mul i8 %3, %2, !dbg !42
- %arrayidx16 = getelementptr inbounds i8, i8* %C, i64 %indvars.iv, !dbg !43
- store i8 %mul, i8* %arrayidx16, align 1, !dbg !44, !tbaa !13
+ %arrayidx16 = getelementptr inbounds i8, ptr %C, i64 %indvars.iv, !dbg !43
+ store i8 %mul, ptr %arrayidx16, align 1, !dbg !44, !tbaa !13
%lftr.wideiv = trunc i64 %indvars.iv.next to i32, !dbg !33
%exitcond = icmp eq i32 %lftr.wideiv, %N, !dbg !33
br i1 %exitcond, label %for.cond.cleanup, label %for.body, !dbg !33
; MISSED_REMARKS: /tmp/t.c:27:5: loop not distributed: use -Rpass-analysis=loop-distribute for more info
; ANALYSIS_REMARKS: /tmp/t.c:27:5: loop not distributed: may not insert runtime check with convergent operation
; ALWAYS: warning: /tmp/t.c:27:5: loop not distributed: failed explicitly specified loop distribution
-define void @convergent(i8* %A, i8* %B, i8* %C, i8* %D, i8* %E, i32 %N) #1 !dbg !45 {
+define void @convergent(ptr %A, ptr %B, ptr %C, ptr %D, ptr %E, i32 %N) #1 !dbg !45 {
entry:
%cmp28 = icmp sgt i32 %N, 0, !dbg !46
br i1 %cmp28, label %ph, label %for.cond.cleanup, !dbg !47
for.body:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %ph ]
- %arrayidx = getelementptr inbounds i8, i8* %A, i64 %indvars.iv, !dbg !49
- %0 = load i8, i8* %arrayidx, align 1, !dbg !49, !tbaa !13
- %arrayidx2 = getelementptr inbounds i8, i8* %B, i64 %indvars.iv, !dbg !50
- %1 = load i8, i8* %arrayidx2, align 1, !dbg !50, !tbaa !13
+ %arrayidx = getelementptr inbounds i8, ptr %A, i64 %indvars.iv, !dbg !49
+ %0 = load i8, ptr %arrayidx, align 1, !dbg !49, !tbaa !13
+ %arrayidx2 = getelementptr inbounds i8, ptr %B, i64 %indvars.iv, !dbg !50
+ %1 = load i8, ptr %arrayidx2, align 1, !dbg !50, !tbaa !13
%add = add i8 %1, %0, !dbg !51
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !57
- %arrayidx7 = getelementptr inbounds i8, i8* %A, i64 %indvars.iv.next, !dbg !52
- store i8 %add, i8* %arrayidx7, align 1, !dbg !53, !tbaa !13
- %arrayidx9 = getelementptr inbounds i8, i8* %D, i64 %indvars.iv, !dbg !54
- %2 = load i8, i8* %arrayidx9, align 1, !dbg !54, !tbaa !13
- %arrayidx12 = getelementptr inbounds i8, i8* %E, i64 %indvars.iv, !dbg !55
- %3 = load i8, i8* %arrayidx12, align 1, !dbg !55, !tbaa !13
+ %arrayidx7 = getelementptr inbounds i8, ptr %A, i64 %indvars.iv.next, !dbg !52
+ store i8 %add, ptr %arrayidx7, align 1, !dbg !53, !tbaa !13
+ %arrayidx9 = getelementptr inbounds i8, ptr %D, i64 %indvars.iv, !dbg !54
+ %2 = load i8, ptr %arrayidx9, align 1, !dbg !54, !tbaa !13
+ %arrayidx12 = getelementptr inbounds i8, ptr %E, i64 %indvars.iv, !dbg !55
+ %3 = load i8, ptr %arrayidx12, align 1, !dbg !55, !tbaa !13
%mul = mul i8 %3, %2, !dbg !56
- %arrayidx16 = getelementptr inbounds i8, i8* %C, i64 %indvars.iv, !dbg !57
- store i8 %mul, i8* %arrayidx16, align 1, !dbg !58, !tbaa !13
+ %arrayidx16 = getelementptr inbounds i8, ptr %C, i64 %indvars.iv, !dbg !57
+ store i8 %mul, ptr %arrayidx16, align 1, !dbg !58, !tbaa !13
call void @llvm.convergent()
%lftr.wideiv = trunc i64 %indvars.iv.next to i32, !dbg !57
%exitcond = icmp eq i32 %lftr.wideiv, %N, !dbg !57
; CHECK-LABEL: @disable_nonforced(
; CHECK-NOT: for.body.ldist1:
-define void @disable_nonforced(i32* noalias %a,
- i32* noalias %b,
- i32* noalias %c,
- i32* noalias %d,
- i32* noalias %e) {
+define void @disable_nonforced(ptr noalias %a,
+ ptr noalias %b,
+ ptr noalias %c,
+ ptr noalias %d,
+ ptr noalias %e) {
entry:
br label %for.body
for.body:
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
+ %loadA = load i32, ptr %arrayidxA, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
- %loadB = load i32, i32* %arrayidxB, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %ind
+ %loadB = load i32, ptr %arrayidxB, align 4
%mulA = mul i32 %loadB, %loadA
%add = add nuw nsw i64 %ind, 1
- %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
- store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i64 %add
+ store i32 %mulA, ptr %arrayidxA_plus_4, align 4
- %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
- %loadD = load i32, i32* %arrayidxD, align 4
+ %arrayidxD = getelementptr inbounds i32, ptr %d, i64 %ind
+ %loadD = load i32, ptr %arrayidxD, align 4
- %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
- %loadE = load i32, i32* %arrayidxE, align 4
+ %arrayidxE = getelementptr inbounds i32, ptr %e, i64 %ind
+ %loadE = load i32, ptr %arrayidxE, align 4
%mulC = mul i32 %loadD, %loadE
- %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
- store i32 %mulC, i32* %arrayidxC, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %ind
+ store i32 %mulC, ptr %arrayidxC, align 4
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0
; CHECK-LABEL: @disable_nonforced(
; CHECK: for.body.ldist1:
-define void @disable_nonforced(i32* noalias %a,
- i32* noalias %b,
- i32* noalias %c,
- i32* noalias %d,
- i32* noalias %e) {
+define void @disable_nonforced(ptr noalias %a,
+ ptr noalias %b,
+ ptr noalias %c,
+ ptr noalias %d,
+ ptr noalias %e) {
entry:
br label %for.body
for.body:
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
+ %loadA = load i32, ptr %arrayidxA, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
- %loadB = load i32, i32* %arrayidxB, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %ind
+ %loadB = load i32, ptr %arrayidxB, align 4
%mulA = mul i32 %loadB, %loadA
%add = add nuw nsw i64 %ind, 1
- %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
- store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i64 %add
+ store i32 %mulA, ptr %arrayidxA_plus_4, align 4
- %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
- %loadD = load i32, i32* %arrayidxD, align 4
+ %arrayidxD = getelementptr inbounds i32, ptr %d, i64 %ind
+ %loadD = load i32, ptr %arrayidxD, align 4
- %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
- %loadE = load i32, i32* %arrayidxE, align 4
+ %arrayidxE = getelementptr inbounds i32, ptr %e, i64 %ind
+ %loadE = load i32, ptr %arrayidxE, align 4
%mulC = mul i32 %loadD, %loadE
- %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
- store i32 %mulC, i32* %arrayidxC, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %ind
+ store i32 %mulC, ptr %arrayidxC, align 4
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0
target triple = "x86_64-apple-macosx10.10.0"
; CHECK-LABEL: @explicit_on(
-define void @explicit_on(i32* noalias %a,
- i32* noalias %b,
- i32* noalias %c,
- i32* noalias %d,
- i32* noalias %e) {
+define void @explicit_on(ptr noalias %a,
+ ptr noalias %b,
+ ptr noalias %c,
+ ptr noalias %d,
+ ptr noalias %e) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
+ %loadA = load i32, ptr %arrayidxA, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
- %loadB = load i32, i32* %arrayidxB, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %ind
+ %loadB = load i32, ptr %arrayidxB, align 4
%mulA = mul i32 %loadB, %loadA
%add = add nuw nsw i64 %ind, 1
- %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
- store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i64 %add
+ store i32 %mulA, ptr %arrayidxA_plus_4, align 4
- %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
- %loadD = load i32, i32* %arrayidxD, align 4
+ %arrayidxD = getelementptr inbounds i32, ptr %d, i64 %ind
+ %loadD = load i32, ptr %arrayidxD, align 4
- %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
- %loadE = load i32, i32* %arrayidxE, align 4
+ %arrayidxE = getelementptr inbounds i32, ptr %e, i64 %ind
+ %loadE = load i32, ptr %arrayidxE, align 4
%mulC = mul i32 %loadD, %loadE
- %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
- store i32 %mulC, i32* %arrayidxC, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %ind
+ store i32 %mulC, ptr %arrayidxC, align 4
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0
}
; CHECK-LABEL: @explicit_off(
-define void @explicit_off(i32* noalias %a,
- i32* noalias %b,
- i32* noalias %c,
- i32* noalias %d,
- i32* noalias %e) {
+define void @explicit_off(ptr noalias %a,
+ ptr noalias %b,
+ ptr noalias %c,
+ ptr noalias %d,
+ ptr noalias %e) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
+ %loadA = load i32, ptr %arrayidxA, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
- %loadB = load i32, i32* %arrayidxB, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %ind
+ %loadB = load i32, ptr %arrayidxB, align 4
%mulA = mul i32 %loadB, %loadA
%add = add nuw nsw i64 %ind, 1
- %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
- store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i64 %add
+ store i32 %mulA, ptr %arrayidxA_plus_4, align 4
- %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
- %loadD = load i32, i32* %arrayidxD, align 4
+ %arrayidxD = getelementptr inbounds i32, ptr %d, i64 %ind
+ %loadD = load i32, ptr %arrayidxD, align 4
- %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
- %loadE = load i32, i32* %arrayidxE, align 4
+ %arrayidxE = getelementptr inbounds i32, ptr %e, i64 %ind
+ %loadE = load i32, ptr %arrayidxE, align 4
%mulC = mul i32 %loadD, %loadE
- %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
- store i32 %mulC, i32* %arrayidxC, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %ind
+ store i32 %mulC, ptr %arrayidxC, align 4
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !2
}
; CHECK-LABEL: @default_distribute(
-define void @default_distribute(i32* noalias %a,
- i32* noalias %b,
- i32* noalias %c,
- i32* noalias %d,
- i32* noalias %e) {
+define void @default_distribute(ptr noalias %a,
+ ptr noalias %b,
+ ptr noalias %c,
+ ptr noalias %d,
+ ptr noalias %e) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
+ %loadA = load i32, ptr %arrayidxA, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
- %loadB = load i32, i32* %arrayidxB, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %ind
+ %loadB = load i32, ptr %arrayidxB, align 4
%mulA = mul i32 %loadB, %loadA
%add = add nuw nsw i64 %ind, 1
- %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
- store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i64 %add
+ store i32 %mulA, ptr %arrayidxA_plus_4, align 4
- %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
- %loadD = load i32, i32* %arrayidxD, align 4
+ %arrayidxD = getelementptr inbounds i32, ptr %d, i64 %ind
+ %loadD = load i32, ptr %arrayidxD, align 4
- %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
- %loadE = load i32, i32* %arrayidxE, align 4
+ %arrayidxE = getelementptr inbounds i32, ptr %e, i64 %ind
+ %loadE = load i32, ptr %arrayidxE, align 4
%mulC = mul i32 %loadD, %loadE
- %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
- store i32 %mulC, i32* %arrayidxC, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %ind
+ store i32 %mulC, ptr %arrayidxC, align 4
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.10.0"
-define void @f(i32* noalias %a,
- i32* noalias %b,
- i32* noalias %c,
- i32* noalias %d,
- i32* noalias %e,
- i32* noalias %g,
- i32* noalias %h,
- i32* noalias %j,
+define void @f(ptr noalias %a,
+ ptr noalias %b,
+ ptr noalias %c,
+ ptr noalias %d,
+ ptr noalias %e,
+ ptr noalias %g,
+ ptr noalias %h,
+ ptr noalias %j,
i64 %x) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%ind = phi i64 [ 0, %entry ], [ %add, %if.end ]
- %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
- %loadD = load i32, i32* %arrayidxD, align 4
+ %arrayidxD = getelementptr inbounds i32, ptr %d, i64 %ind
+ %loadD = load i32, ptr %arrayidxD, align 4
- %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
- %loadE = load i32, i32* %arrayidxE, align 4
+ %arrayidxE = getelementptr inbounds i32, ptr %e, i64 %ind
+ %loadE = load i32, ptr %arrayidxE, align 4
%mulC = mul i32 %loadD, %loadE
- %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
- store i32 %mulC, i32* %arrayidxC, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %ind
+ store i32 %mulC, ptr %arrayidxC, align 4
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
+ %loadA = load i32, ptr %arrayidxA, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
- %loadB = load i32, i32* %arrayidxB, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %ind
+ %loadB = load i32, ptr %arrayidxB, align 4
%mulA = mul i32 %loadB, %loadA
%add = add nuw nsw i64 %ind, 1
- %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
- store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i64 %add
+ store i32 %mulA, ptr %arrayidxA_plus_4, align 4
%if.cond = icmp eq i64 %ind, %x
br i1 %if.cond, label %if.then, label %if.end
if.then:
- %arrayidxH = getelementptr inbounds i32, i32* %h, i64 %ind
- %loadH = load i32, i32* %arrayidxH, align 4
+ %arrayidxH = getelementptr inbounds i32, ptr %h, i64 %ind
+ %loadH = load i32, ptr %arrayidxH, align 4
- %arrayidxJ = getelementptr inbounds i32, i32* %j, i64 %ind
- %loadJ = load i32, i32* %arrayidxJ, align 4
+ %arrayidxJ = getelementptr inbounds i32, ptr %j, i64 %ind
+ %loadJ = load i32, ptr %arrayidxJ, align 4
%mulG = mul i32 %loadH, %loadJ
- %arrayidxG = getelementptr inbounds i32, i32* %g, i64 %ind
- store i32 %mulG, i32* %arrayidxG, align 4
+ %arrayidxG = getelementptr inbounds i32, ptr %g, i64 %ind
+ store i32 %mulG, ptr %arrayidxG, align 4
br label %if.end
if.end:
; Testcases inspired by PR50296, PR50288.
-define void @phi_load_store_distribute(i1 %c, i16* %A, i16* %B, i16* %C) {
+define void @phi_load_store_distribute(i1 %c, ptr %A, ptr %B, ptr %C) {
; CHECK-LABEL: @phi_load_store_distribute(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[IF_END:%.*]] ]
-; CHECK-NEXT: [[LV:%.*]] = load i16, i16* [[A:%.*]], align 1
-; CHECK-NEXT: store i16 [[LV]], i16* [[A]], align 1
+; CHECK-NEXT: [[LV:%.*]] = load i16, ptr [[A:%.*]], align 1
+; CHECK-NEXT: store i16 [[LV]], ptr [[A]], align 1
; CHECK-NEXT: br i1 [[C:%.*]], label [[IF_THEN:%.*]], label [[IF_END]]
; CHECK: if.then:
-; CHECK-NEXT: [[LV2:%.*]] = load i16, i16* [[A]], align 1
+; CHECK-NEXT: [[LV2:%.*]] = load i16, ptr [[A]], align 1
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
-; CHECK-NEXT: [[C_SINK:%.*]] = phi i16* [ [[B:%.*]], [[IF_THEN]] ], [ [[C:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[LV3:%.*]] = load i16, i16* [[C_SINK]], align 2
+; CHECK-NEXT: [[C_SINK:%.*]] = phi ptr [ [[B:%.*]], [[IF_THEN]] ], [ [[C:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[LV3:%.*]] = load i16, ptr [[C_SINK]], align 2
; CHECK-NEXT: [[ADD:%.*]] = add i16 [[LV3]], 10
-; CHECK-NEXT: store i16 [[ADD]], i16* [[C_SINK]], align 1
+; CHECK-NEXT: store i16 [[ADD]], ptr [[C_SINK]], align 1
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i16 [[IV]], 1
; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i16 [[IV_NEXT]], 1000
; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY]]
for.body: ; preds = %if.end, %entry
%iv = phi i16 [ 0, %entry ], [ %iv.next, %if.end ]
- %lv = load i16, i16* %A, align 1
- store i16 %lv, i16* %A, align 1
+ %lv = load i16, ptr %A, align 1
+ store i16 %lv, ptr %A, align 1
br i1 %c, label %if.then, label %if.end
if.then: ; preds = %for.body
- %lv2 = load i16, i16* %A, align 1
+ %lv2 = load i16, ptr %A, align 1
br label %if.end
if.end: ; preds = %if.then, %for.body
- %c.sink = phi i16* [ %B, %if.then ], [ %C, %for.body ]
- %lv3 = load i16, i16* %c.sink
+ %c.sink = phi ptr [ %B, %if.then ], [ %C, %for.body ]
+ %lv3 = load i16, ptr %c.sink
%add = add i16 %lv3, 10
- store i16 %add, i16* %c.sink, align 1
+ store i16 %add, ptr %c.sink, align 1
%iv.next = add nuw nsw i16 %iv, 1
%tobool.not = icmp eq i16 %iv.next, 1000
br i1 %tobool.not, label %for.end.loopexit, label %for.body
ret void
}
-define void @phi_load_distribute(i1 %c, i16* %A, i16* %B, i16* %C) {
+define void @phi_load_distribute(i1 %c, ptr %A, ptr %B, ptr %C) {
; CHECK-LABEL: @phi_load_distribute(
; CHECK-NEXT: entry:
; CHECK: for.end.loopexit:
for.body: ; preds = %if.end, %entry
%iv = phi i16 [ 0, %entry ], [ %iv.next, %if.end ]
- %lv = load i16, i16* %A, align 1
- store i16 %lv, i16* %A, align 1
+ %lv = load i16, ptr %A, align 1
+ store i16 %lv, ptr %A, align 1
br i1 %c, label %if.then, label %if.end
if.then: ; preds = %for.body
- %lv2 = load i16, i16* %A, align 1
+ %lv2 = load i16, ptr %A, align 1
br label %if.end
if.end: ; preds = %if.then, %for.body
- %c.sink = phi i16* [ %B, %if.then ], [ %C, %for.body ]
- %lv3 = load i16, i16* %c.sink
+ %c.sink = phi ptr [ %B, %if.then ], [ %C, %for.body ]
+ %lv3 = load i16, ptr %c.sink
%iv.next = add nuw nsw i16 %iv, 1
%tobool.not = icmp eq i16 %iv.next, 1000
br i1 %tobool.not, label %for.end.loopexit, label %for.body
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define void @fn1(i64 %a, i64* %b) {
+define void @fn1(i64 %a, ptr %b) {
entry:
br label %for.body
for.body:
%add75.epil = phi i64 [ %add7.epil, %for.body ], [ %a, %entry ]
%add1.epil = add nsw i64 %add75.epil, 268435457
- %arrayidx.epil = getelementptr inbounds i64, i64* %b, i64 %add1.epil
- %load = load i64, i64* %arrayidx.epil, align 8
+ %arrayidx.epil = getelementptr inbounds i64, ptr %b, i64 %add1.epil
+ %load = load i64, ptr %arrayidx.epil, align 8
%add5.epil = add nsw i64 %add75.epil, 805306369
- %arrayidx6.epil = getelementptr inbounds i64, i64* %b, i64 %add5.epil
- store i64 %load, i64* %arrayidx6.epil, align 8
+ %arrayidx6.epil = getelementptr inbounds i64, ptr %b, i64 %add5.epil
+ store i64 %load, ptr %arrayidx6.epil, align 8
%add7.epil = add nsw i64 %add75.epil, 2
%epil.iter.cmp = icmp eq i64 %add7.epil, 0
br i1 %epil.iter.cmp, label %for.end, label %for.body
; CHECK: %[[phi:.*]] = phi i64
; CHECK: %[[add1:.*]] = add nsw i64 %[[phi]], 268435457
- ; CHECK: %[[gep1:.*]] = getelementptr inbounds i64, i64* %b, i64 %[[add1]]
- ; CHECK: %[[load:.*]] = load i64, i64* %[[gep1]], align 8
+ ; CHECK: %[[gep1:.*]] = getelementptr inbounds i64, ptr %b, i64 %[[add1]]
+ ; CHECK: %[[load:.*]] = load i64, ptr %[[gep1]], align 8
; CHECK: %[[add2:.*]] = add nsw i64 %[[phi]], 805306369
- ; CHECK: %[[gep2:.*]] = getelementptr inbounds i64, i64* %b, i64 %[[add2]]
- ; CHECK: store i64 %[[load]], i64* %[[gep2]], align 8
+ ; CHECK: %[[gep2:.*]] = getelementptr inbounds i64, ptr %b, i64 %[[add2]]
+ ; CHECK: store i64 %[[load]], ptr %[[gep2]], align 8
; CHECK: %[[incr:.*]] = add nsw i64 %[[phi]], 2
; CHECK: %[[cmp:.*]] = icmp eq i64 %[[incr]], 0
; CHECK: br i1 %[[cmp]]
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.10.0"
-define void @f(i32* noalias %a,
- i32* noalias %b,
- i32* noalias %c,
- i32* noalias %d,
- i32* noalias %e) {
+define void @f(ptr noalias %a,
+ ptr noalias %b,
+ ptr noalias %c,
+ ptr noalias %d,
+ ptr noalias %e) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
+ %loadA = load i32, ptr %arrayidxA, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
- %loadB = load i32, i32* %arrayidxB, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %ind
+ %loadB = load i32, ptr %arrayidxB, align 4
%mulA = mul i32 %loadB, %loadA
- %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
- %loadD = load i32, i32* %arrayidxD, align 4
+ %arrayidxD = getelementptr inbounds i32, ptr %d, i64 %ind
+ %loadD = load i32, ptr %arrayidxD, align 4
%add = add nuw nsw i64 %ind, 1
- %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
- store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i64 %add
+ store i32 %mulA, ptr %arrayidxA_plus_4, align 4
- %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %ind
- %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
- %loadE = load i32, i32* %arrayidxE, align 4
+ %arrayidxE = getelementptr inbounds i32, ptr %e, i64 %ind
+ %loadE = load i32, ptr %arrayidxE, align 4
%mulC = mul i32 %loadD, %loadE
- store i32 %mulC, i32* %arrayidxC, align 4
+ store i32 %mulC, ptr %arrayidxC, align 4
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.10.0"
-define void @f(i32* noalias %a,
+define void @f(ptr noalias %a,
;
;
; DEFAULT-LABEL: @f(
; DEFAULT-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
; DEFAULT: for.body.lver.orig:
; DEFAULT-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[ADD_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
-; DEFAULT-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[IND_LVER_ORIG]]
-; DEFAULT-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXA_LVER_ORIG]], align 4
-; DEFAULT-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[IND_LVER_ORIG]]
-; DEFAULT-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXB_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IND_LVER_ORIG]]
+; DEFAULT-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXA_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[IND_LVER_ORIG]]
+; DEFAULT-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXB_LVER_ORIG]], align 4
; DEFAULT-NEXT: [[MULA_LVER_ORIG:%.*]] = mul i32 [[LOADB_LVER_ORIG]], [[LOADA_LVER_ORIG]]
; DEFAULT-NEXT: [[ADD_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
-; DEFAULT-NEXT: [[ARRAYIDXA_PLUS_4_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LVER_ORIG]]
-; DEFAULT-NEXT: store i32 [[MULA_LVER_ORIG]], i32* [[ARRAYIDXA_PLUS_4_LVER_ORIG]], align 4
-; DEFAULT-NEXT: [[ARRAYIDXD_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[D:%.*]], i64 [[IND_LVER_ORIG]]
-; DEFAULT-NEXT: [[LOADD_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXD_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ARRAYIDXA_PLUS_4_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[ADD_LVER_ORIG]]
+; DEFAULT-NEXT: store i32 [[MULA_LVER_ORIG]], ptr [[ARRAYIDXA_PLUS_4_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ARRAYIDXD_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[D:%.*]], i64 [[IND_LVER_ORIG]]
+; DEFAULT-NEXT: [[LOADD_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXD_LVER_ORIG]], align 4
; DEFAULT-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i64 [[IND_LVER_ORIG]], [[STRIDE]]
-; DEFAULT-NEXT: [[ARRAYIDXSTRIDEDA_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_LVER_ORIG]]
-; DEFAULT-NEXT: [[LOADSTRIDEDA_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXSTRIDEDA_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ARRAYIDXSTRIDEDA_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[MUL_LVER_ORIG]]
+; DEFAULT-NEXT: [[LOADSTRIDEDA_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXSTRIDEDA_LVER_ORIG]], align 4
; DEFAULT-NEXT: [[MULC_LVER_ORIG:%.*]] = mul i32 [[LOADD_LVER_ORIG]], [[LOADSTRIDEDA_LVER_ORIG]]
-; DEFAULT-NEXT: [[ARRAYIDXC_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[IND_LVER_ORIG]]
-; DEFAULT-NEXT: store i32 [[MULC_LVER_ORIG]], i32* [[ARRAYIDXC_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ARRAYIDXC_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[IND_LVER_ORIG]]
+; DEFAULT-NEXT: store i32 [[MULC_LVER_ORIG]], ptr [[ARRAYIDXC_LVER_ORIG]], align 4
; DEFAULT-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[ADD_LVER_ORIG]], 20
; DEFAULT-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
; DEFAULT: for.body.ph.ldist1:
; DEFAULT-NEXT: br label [[FOR_BODY_LDIST1:%.*]]
; DEFAULT: for.body.ldist1:
; DEFAULT-NEXT: [[IND_LDIST1:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LDIST1]] ], [ [[ADD_LDIST1:%.*]], [[FOR_BODY_LDIST1]] ]
-; DEFAULT-NEXT: [[ARRAYIDXA_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[IND_LDIST1]]
-; DEFAULT-NEXT: [[LOADA_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXA_LDIST1]], align 4
-; DEFAULT-NEXT: [[ARRAYIDXB_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[IND_LDIST1]]
-; DEFAULT-NEXT: [[LOADB_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXB_LDIST1]], align 4
+; DEFAULT-NEXT: [[ARRAYIDXA_LDIST1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IND_LDIST1]]
+; DEFAULT-NEXT: [[LOADA_LDIST1:%.*]] = load i32, ptr [[ARRAYIDXA_LDIST1]], align 4
+; DEFAULT-NEXT: [[ARRAYIDXB_LDIST1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IND_LDIST1]]
+; DEFAULT-NEXT: [[LOADB_LDIST1:%.*]] = load i32, ptr [[ARRAYIDXB_LDIST1]], align 4
; DEFAULT-NEXT: [[MULA_LDIST1:%.*]] = mul i32 [[LOADB_LDIST1]], [[LOADA_LDIST1]]
; DEFAULT-NEXT: [[ADD_LDIST1]] = add nuw nsw i64 [[IND_LDIST1]], 1
-; DEFAULT-NEXT: [[ARRAYIDXA_PLUS_4_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LDIST1]]
-; DEFAULT-NEXT: store i32 [[MULA_LDIST1]], i32* [[ARRAYIDXA_PLUS_4_LDIST1]], align 4
+; DEFAULT-NEXT: [[ARRAYIDXA_PLUS_4_LDIST1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[ADD_LDIST1]]
+; DEFAULT-NEXT: store i32 [[MULA_LDIST1]], ptr [[ARRAYIDXA_PLUS_4_LDIST1]], align 4
; DEFAULT-NEXT: [[EXITCOND_LDIST1:%.*]] = icmp eq i64 [[ADD_LDIST1]], 20
; DEFAULT-NEXT: br i1 [[EXITCOND_LDIST1]], label [[FOR_BODY_PH:%.*]], label [[FOR_BODY_LDIST1]]
; DEFAULT: for.body.ph:
; DEFAULT: for.body:
; DEFAULT-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; DEFAULT-NEXT: [[ADD]] = add nuw nsw i64 [[IND]], 1
-; DEFAULT-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, i32* [[D]], i64 [[IND]]
-; DEFAULT-NEXT: [[LOADD:%.*]] = load i32, i32* [[ARRAYIDXD]], align 4
+; DEFAULT-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[IND]]
+; DEFAULT-NEXT: [[LOADD:%.*]] = load i32, ptr [[ARRAYIDXD]], align 4
; DEFAULT-NEXT: [[MUL:%.*]] = mul i64 [[IND]], [[STRIDE]]
-; DEFAULT-NEXT: [[ARRAYIDXSTRIDEDA:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL]]
-; DEFAULT-NEXT: [[LOADSTRIDEDA:%.*]] = load i32, i32* [[ARRAYIDXSTRIDEDA]], align 4
+; DEFAULT-NEXT: [[ARRAYIDXSTRIDEDA:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[MUL]]
+; DEFAULT-NEXT: [[LOADSTRIDEDA:%.*]] = load i32, ptr [[ARRAYIDXSTRIDEDA]], align 4
; DEFAULT-NEXT: [[MULC:%.*]] = mul i32 [[LOADD]], [[LOADSTRIDEDA]]
-; DEFAULT-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[IND]]
-; DEFAULT-NEXT: store i32 [[MULC]], i32* [[ARRAYIDXC]], align 4
+; DEFAULT-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IND]]
+; DEFAULT-NEXT: store i32 [[MULC]], ptr [[ARRAYIDXC]], align 4
; DEFAULT-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], 20
; DEFAULT-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT1:%.*]], label [[FOR_BODY]]
; DEFAULT: for.end.loopexit:
; NO-VERSION-NEXT: br label [[FOR_BODY:%.*]]
; NO-VERSION: for.body:
; NO-VERSION-NEXT: [[IND:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; NO-VERSION-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[IND]]
-; NO-VERSION-NEXT: [[LOADA:%.*]] = load i32, i32* [[ARRAYIDXA]], align 4
-; NO-VERSION-NEXT: [[ARRAYIDXB:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[IND]]
-; NO-VERSION-NEXT: [[LOADB:%.*]] = load i32, i32* [[ARRAYIDXB]], align 4
+; NO-VERSION-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IND]]
+; NO-VERSION-NEXT: [[LOADA:%.*]] = load i32, ptr [[ARRAYIDXA]], align 4
+; NO-VERSION-NEXT: [[ARRAYIDXB:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[IND]]
+; NO-VERSION-NEXT: [[LOADB:%.*]] = load i32, ptr [[ARRAYIDXB]], align 4
; NO-VERSION-NEXT: [[MULA:%.*]] = mul i32 [[LOADB]], [[LOADA]]
; NO-VERSION-NEXT: [[ADD]] = add nuw nsw i64 [[IND]], 1
-; NO-VERSION-NEXT: [[ARRAYIDXA_PLUS_4:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD]]
-; NO-VERSION-NEXT: store i32 [[MULA]], i32* [[ARRAYIDXA_PLUS_4]], align 4
-; NO-VERSION-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, i32* [[D:%.*]], i64 [[IND]]
-; NO-VERSION-NEXT: [[LOADD:%.*]] = load i32, i32* [[ARRAYIDXD]], align 4
+; NO-VERSION-NEXT: [[ARRAYIDXA_PLUS_4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[ADD]]
+; NO-VERSION-NEXT: store i32 [[MULA]], ptr [[ARRAYIDXA_PLUS_4]], align 4
+; NO-VERSION-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, ptr [[D:%.*]], i64 [[IND]]
+; NO-VERSION-NEXT: [[LOADD:%.*]] = load i32, ptr [[ARRAYIDXD]], align 4
; NO-VERSION-NEXT: [[MUL:%.*]] = mul i64 [[IND]], [[STRIDE:%.*]]
-; NO-VERSION-NEXT: [[ARRAYIDXSTRIDEDA:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL]]
-; NO-VERSION-NEXT: [[LOADSTRIDEDA:%.*]] = load i32, i32* [[ARRAYIDXSTRIDEDA]], align 4
+; NO-VERSION-NEXT: [[ARRAYIDXSTRIDEDA:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[MUL]]
+; NO-VERSION-NEXT: [[LOADSTRIDEDA:%.*]] = load i32, ptr [[ARRAYIDXSTRIDEDA]], align 4
; NO-VERSION-NEXT: [[MULC:%.*]] = mul i32 [[LOADD]], [[LOADSTRIDEDA]]
-; NO-VERSION-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[IND]]
-; NO-VERSION-NEXT: store i32 [[MULC]], i32* [[ARRAYIDXC]], align 4
+; NO-VERSION-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[IND]]
+; NO-VERSION-NEXT: store i32 [[MULC]], ptr [[ARRAYIDXC]], align 4
; NO-VERSION-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], 20
; NO-VERSION-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; NO-VERSION: for.end:
; NO-VERSION-NEXT: ret void
;
i32* noalias %b,
- i32* noalias %c,
- i32* noalias %d,
+ ptr noalias %c,
+ ptr noalias %d,
i64 %stride) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
+ %loadA = load i32, ptr %arrayidxA, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
- %loadB = load i32, i32* %arrayidxB, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %ind
+ %loadB = load i32, ptr %arrayidxB, align 4
%mulA = mul i32 %loadB, %loadA
%add = add nuw nsw i64 %ind, 1
- %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
- store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i64 %add
+ store i32 %mulA, ptr %arrayidxA_plus_4, align 4
- %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
- %loadD = load i32, i32* %arrayidxD, align 4
+ %arrayidxD = getelementptr inbounds i32, ptr %d, i64 %ind
+ %loadD = load i32, ptr %arrayidxD, align 4
%mul = mul i64 %ind, %stride
- %arrayidxStridedA = getelementptr inbounds i32, i32* %a, i64 %mul
- %loadStridedA = load i32, i32* %arrayidxStridedA, align 4
+ %arrayidxStridedA = getelementptr inbounds i32, ptr %a, i64 %mul
+ %loadStridedA = load i32, ptr %arrayidxStridedA, align 4
%mulC = mul i32 %loadD, %loadStridedA
- %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
- store i32 %mulC, i32* %arrayidxC, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %ind
+ store i32 %mulC, ptr %arrayidxC, align 4
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body
; TODO
; Can distribute with unknown backedge-taken count, because no runtime checks are
; required.
-define void @unknown_btc_distribute_no_checks_needed(i32* noalias %a,
- i32* noalias %c,
- i32* noalias %d) {
+define void @unknown_btc_distribute_no_checks_needed(ptr noalias %a,
+ ptr noalias %c,
+ ptr noalias %d) {
; CHECK-LABEL: @unknown_btc_distribute_no_checks_needed(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label %for.body
for.body: ; preds = %for.body, %entry
%ind = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %a, i32 %ind
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i32 %ind
+ %loadA = load i32, ptr %arrayidxA, align 4
%mulA = mul i32 %loadA, 10
%add = add nuw nsw i32 %ind, 1
- %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i32 %add
- store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i32 %add
+ store i32 %mulA, ptr %arrayidxA_plus_4, align 4
- %arrayidxD = getelementptr inbounds i32, i32* %d, i32 %ind
- %loadD = load i32, i32* %arrayidxD, align 4
+ %arrayidxD = getelementptr inbounds i32, ptr %d, i32 %ind
+ %loadD = load i32, ptr %arrayidxD, align 4
%mulC = mul i32 %loadD, 20
- %arrayidxC = getelementptr inbounds i32, i32* %c, i32 %ind
- store i32 %mulC, i32* %arrayidxC, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i32 %ind
+ store i32 %mulC, ptr %arrayidxC, align 4
br i1 false, label %for.end, label %for.body
; Cannot distribute with unknown backedge-taken count, because runtime checks for
; induction wrapping are required.
-define void @unknown_btc_do_not_distribute_wrapping_checks(i32* noalias %a,
- i32* noalias %c,
- i32* noalias %d) {
+define void @unknown_btc_do_not_distribute_wrapping_checks(ptr noalias %a,
+ ptr noalias %c,
+ ptr noalias %d) {
; CHECK-LABEL: @unknown_btc_do_not_distribute_wrapping_checks(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label %for.body
for.body: ; preds = %for.body, %entry
%ind = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %a, i32 %ind
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i32 %ind
+ %loadA = load i32, ptr %arrayidxA, align 4
%mulA = mul i32 %loadA, 10
%add = add i32 %ind, 1
- %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i32 %add
- store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i32 %add
+ store i32 %mulA, ptr %arrayidxA_plus_4, align 4
- %arrayidxD = getelementptr inbounds i32, i32* %d, i32 %ind
- %loadD = load i32, i32* %arrayidxD, align 4
+ %arrayidxD = getelementptr inbounds i32, ptr %d, i32 %ind
+ %loadD = load i32, ptr %arrayidxD, align 4
%mulC = mul i32 %loadD, 20
- %arrayidxC = getelementptr inbounds i32, i32* %c, i32 %ind
- store i32 %mulC, i32* %arrayidxC, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i32 %ind
+ store i32 %mulC, ptr %arrayidxC, align 4
br i1 false, label %for.end, label %for.body
; Verify that we didn't distribute by checking that we still have the original
; number of branches.
-@A = common global i32* null, align 8
-@B = common global i32* null, align 8
-@C = common global i32* null, align 8
+@A = common global ptr null, align 8
+@B = common global ptr null, align 8
+@C = common global ptr null, align 8
define void @f() {
entry:
- %a = load i32*, i32** @A, align 8
- %b = load i32*, i32** @B, align 8
- %c = load i32*, i32** @C, align 8
+ %a = load ptr, ptr @A, align 8
+ %b = load ptr, ptr @B, align 8
+ %c = load ptr, ptr @C, align 8
br label %for.body
; CHECK: br
for.body: ; preds = %for.body, %entry
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
+ %loadA = load i32, ptr %arrayidxA, align 4
%mulA = mul i32 %loadA, 3
%add = add nuw nsw i64 %ind, 1
- %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
- store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i64 %add
+ store i32 %mulA, ptr %arrayidxA_plus_4, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
- %loadB = load i32, i32* %arrayidxB, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %ind
+ %loadB = load i32, ptr %arrayidxB, align 4
%mulC = mul i32 %loadB, 2
%ind_2 = mul i64 %ind, %ind
- %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind_2
- store i32 %mulC, i32* %arrayidxC, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %ind_2
+ store i32 %mulC, ptr %arrayidxC, align 4
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
-define dso_local void @inner_limit_not_invariant(i32 %N, i32* nocapture %C, i16* nocapture readonly %A, i16 %val) {
+define dso_local void @inner_limit_not_invariant(i32 %N, ptr nocapture %C, ptr nocapture readonly %A, i16 %val) {
; CHECK-LABEL: @inner_limit_not_invariant(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP26_NOT:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[INDVARS_IV]] to i32
; CHECK-NEXT: [[ADD_US:%.*]] = add i32 [[TMP0]], [[MUL_US]]
; CHECK-NEXT: [[IDXPROM_US:%.*]] = zext i32 [[ADD_US]] to i64
-; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i16, i16* [[A:%.*]], i64 [[IDXPROM_US]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, i16* [[ARRAYIDX_US]], align 2
+; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i16, ptr [[A:%.*]], i64 [[IDXPROM_US]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[ARRAYIDX_US]], align 2
; CHECK-NEXT: [[CONV_US:%.*]] = sext i16 [[TMP1]] to i32
; CHECK-NEXT: [[MUL5_US:%.*]] = mul nsw i32 [[CONV_US]], [[CONV4]]
-; CHECK-NEXT: [[ARRAYIDX9_US:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[IDXPROM_US]]
-; CHECK-NEXT: store i32 [[MUL5_US]], i32* [[ARRAYIDX9_US]], align 4
+; CHECK-NEXT: [[ARRAYIDX9_US:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[IDXPROM_US]]
+; CHECK-NEXT: store i32 [[MUL5_US]], ptr [[ARRAYIDX9_US]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY3_US]], label [[FOR_COND1_FOR_INC10_CRIT_EDGE_US]]
%0 = trunc i64 %indvars.iv to i32
%add.us = add i32 %0, %mul.us
%idxprom.us = zext i32 %add.us to i64
- %arrayidx.us = getelementptr inbounds i16, i16* %A, i64 %idxprom.us
- %1 = load i16, i16* %arrayidx.us, align 2
+ %arrayidx.us = getelementptr inbounds i16, ptr %A, i64 %idxprom.us
+ %1 = load i16, ptr %arrayidx.us, align 2
%conv.us = sext i16 %1 to i32
%mul5.us = mul nsw i32 %conv.us, %conv4
- %arrayidx9.us = getelementptr inbounds i32, i32* %C, i64 %idxprom.us
- store i32 %mul5.us, i32* %arrayidx9.us, align 4
+ %arrayidx9.us = getelementptr inbounds i32, ptr %C, i64 %idxprom.us
+ store i32 %mul5.us, ptr %arrayidx9.us, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp ne i64 %indvars.iv.next, %wide.trip.count
br i1 %exitcond, label %for.body3.us, label %for.cond1.for.inc10_crit_edge.us
ret void
}
-define dso_local void @outer_limit_not_invariant(i32 %N, i32* nocapture %C, i16* nocapture readonly %A, i16 %val, i64 %M) {
+define dso_local void @outer_limit_not_invariant(i32 %N, ptr nocapture %C, ptr nocapture readonly %A, i16 %val, i64 %M) {
; CHECK-LABEL: @outer_limit_not_invariant(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP26_NOT:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[INDVARS_IV]] to i32
; CHECK-NEXT: [[ADD_US:%.*]] = add i32 [[TMP0]], [[MUL_US]]
; CHECK-NEXT: [[IDXPROM_US:%.*]] = zext i32 [[ADD_US]] to i64
-; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i16, i16* [[A:%.*]], i64 [[IDXPROM_US]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, i16* [[ARRAYIDX_US]], align 2
+; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i16, ptr [[A:%.*]], i64 [[IDXPROM_US]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[ARRAYIDX_US]], align 2
; CHECK-NEXT: [[CONV_US:%.*]] = sext i16 [[TMP1]] to i32
; CHECK-NEXT: [[MUL5_US:%.*]] = mul nsw i32 [[CONV_US]], [[CONV4]]
-; CHECK-NEXT: [[ARRAYIDX9_US:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[IDXPROM_US]]
-; CHECK-NEXT: store i32 [[MUL5_US]], i32* [[ARRAYIDX9_US]], align 4
+; CHECK-NEXT: [[ARRAYIDX9_US:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[IDXPROM_US]]
+; CHECK-NEXT: store i32 [[MUL5_US]], ptr [[ARRAYIDX9_US]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[M]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY3_US]], label [[FOR_COND1_FOR_INC10_CRIT_EDGE_US]]
%0 = trunc i64 %indvars.iv to i32
%add.us = add i32 %0, %mul.us
%idxprom.us = zext i32 %add.us to i64
- %arrayidx.us = getelementptr inbounds i16, i16* %A, i64 %idxprom.us
- %1 = load i16, i16* %arrayidx.us, align 2
+ %arrayidx.us = getelementptr inbounds i16, ptr %A, i64 %idxprom.us
+ %1 = load i16, ptr %arrayidx.us, align 2
%conv.us = sext i16 %1 to i32
%mul5.us = mul nsw i32 %conv.us, %conv4
- %arrayidx9.us = getelementptr inbounds i32, i32* %C, i64 %idxprom.us
- store i32 %mul5.us, i32* %arrayidx9.us, align 4
+ %arrayidx9.us = getelementptr inbounds i32, ptr %C, i64 %idxprom.us
+ store i32 %mul5.us, ptr %arrayidx9.us, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp ne i64 %indvars.iv.next, %M
br i1 %exitcond, label %for.body3.us, label %for.cond1.for.inc10_crit_edge.us
; CHECK-NOT: Checks all passed, doing the transformation
; Outer loop does not start at zero
-define void @test_1(i32 %N, i32* nocapture %C, i32* nocapture readonly %A, i32 %scale) {
+define void @test_1(i32 %N, ptr nocapture %C, ptr nocapture readonly %A, i32 %scale) {
entry:
%cmp25 = icmp sgt i32 %N, 0
br i1 %cmp25, label %for.body4.lr.ph, label %for.cond.cleanup
for.body4:
%j.024 = phi i32 [ 0, %for.body4.lr.ph ], [ %inc, %for.body4 ]
%add = add nsw i32 %j.024, %mul
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %add
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %add
+ %0 = load i32, ptr %arrayidx, align 4
%mul5 = mul nsw i32 %0, %scale
- %arrayidx8 = getelementptr inbounds i32, i32* %C, i32 %add
- store i32 %mul5, i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds i32, ptr %C, i32 %add
+ store i32 %mul5, ptr %arrayidx8, align 4
%inc = add nuw nsw i32 %j.024, 1
%exitcond = icmp eq i32 %inc, %N
br i1 %exitcond, label %for.cond.cleanup3, label %for.body4
}
; Inner loop does not start at zero
-define void @test_2(i32 %N, i32* nocapture %C, i32* nocapture readonly %A, i32 %scale) {
+define void @test_2(i32 %N, ptr nocapture %C, ptr nocapture readonly %A, i32 %scale) {
entry:
%cmp25 = icmp sgt i32 %N, 0
br i1 %cmp25, label %for.body4.lr.ph, label %for.cond.cleanup
for.body4:
%j.024 = phi i32 [ 1, %for.body4.lr.ph ], [ %inc, %for.body4 ]
%add = add nsw i32 %j.024, %mul
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %add
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %add
+ %0 = load i32, ptr %arrayidx, align 4
%mul5 = mul nsw i32 %0, %scale
- %arrayidx8 = getelementptr inbounds i32, i32* %C, i32 %add
- store i32 %mul5, i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds i32, ptr %C, i32 %add
+ store i32 %mul5, ptr %arrayidx8, align 4
%inc = add nuw nsw i32 %j.024, 1
%exitcond = icmp eq i32 %inc, %N
br i1 %exitcond, label %for.cond.cleanup3, label %for.body4
}
; Outer IV used directly
-define hidden void @test_3(i16 zeroext %N, i32* nocapture %C, i32* nocapture readonly %A, i32 %scale) {
+define hidden void @test_3(i16 zeroext %N, ptr nocapture %C, ptr nocapture readonly %A, i32 %scale) {
entry:
%conv = zext i16 %N to i32
%cmp25 = icmp eq i16 %N, 0
for.body.us: ; preds = %for.cond2.for.cond.cleanup6_crit_edge.us, %for.body.lr.ph.split.us
%i.026.us = phi i32 [ 0, %for.body.lr.ph.split.us ], [ %inc12.us, %for.cond2.for.cond.cleanup6_crit_edge.us ]
- %arrayidx.us = getelementptr inbounds i32, i32* %A, i32 %i.026.us
+ %arrayidx.us = getelementptr inbounds i32, ptr %A, i32 %i.026.us
%mul9.us = mul nuw nsw i32 %i.026.us, %conv
br label %for.body7.us
for.body7.us: ; preds = %for.body.us, %for.body7.us
%j.024.us = phi i32 [ 0, %for.body.us ], [ %inc.us, %for.body7.us ]
- %0 = load i32, i32* %arrayidx.us, align 4
+ %0 = load i32, ptr %arrayidx.us, align 4
%mul.us = mul nsw i32 %0, %scale
%add.us = add nuw nsw i32 %j.024.us, %mul9.us
- %arrayidx10.us = getelementptr inbounds i32, i32* %C, i32 %add.us
- store i32 %mul.us, i32* %arrayidx10.us, align 4
+ %arrayidx10.us = getelementptr inbounds i32, ptr %C, i32 %add.us
+ store i32 %mul.us, ptr %arrayidx10.us, align 4
%inc.us = add nuw nsw i32 %j.024.us, 1
%exitcond = icmp ne i32 %inc.us, %conv
br i1 %exitcond, label %for.body7.us, label %for.cond2.for.cond.cleanup6_crit_edge.us
}
; Inner IV used directly
-define hidden void @test_4(i16 zeroext %N, i32* nocapture %C, i32* nocapture readonly %A, i32 %scale) {
+define hidden void @test_4(i16 zeroext %N, ptr nocapture %C, ptr nocapture readonly %A, i32 %scale) {
entry:
%conv = zext i16 %N to i32
%cmp25 = icmp eq i16 %N, 0
for.body7.us: ; preds = %for.body.us, %for.body7.us
%j.024.us = phi i32 [ 0, %for.body.us ], [ %inc.us, %for.body7.us ]
- %arrayidx.us = getelementptr inbounds i32, i32* %A, i32 %j.024.us
- %0 = load i32, i32* %arrayidx.us, align 4
+ %arrayidx.us = getelementptr inbounds i32, ptr %A, i32 %j.024.us
+ %0 = load i32, ptr %arrayidx.us, align 4
%mul.us = mul nsw i32 %0, %scale
%add.us = add nuw nsw i32 %j.024.us, %mul9.us
- %arrayidx10.us = getelementptr inbounds i32, i32* %C, i32 %add.us
- store i32 %mul.us, i32* %arrayidx10.us, align 4
+ %arrayidx10.us = getelementptr inbounds i32, ptr %C, i32 %add.us
+ store i32 %mul.us, ptr %arrayidx10.us, align 4
%inc.us = add nuw nsw i32 %j.024.us, 1
%exitcond = icmp ne i32 %inc.us, %conv
br i1 %exitcond, label %for.body7.us, label %for.cond2.for.cond.cleanup6_crit_edge.us
; Inner iteration count not invariant in outer loop
declare i32 @get_int() readonly
-define void @test_5(i16 zeroext %N, i32* nocapture %C, i32* nocapture readonly %A, i32 %scale) {
+define void @test_5(i16 zeroext %N, ptr nocapture %C, ptr nocapture readonly %A, i32 %scale) {
entry:
%conv = zext i16 %N to i32
%cmp27 = icmp eq i16 %N, 0
for.body6: ; preds = %for.body6.lr.ph, %for.body6
%j.026 = phi i32 [ 0, %for.body6.lr.ph ], [ %inc, %for.body6 ]
%add = add nsw i32 %j.026, %mul
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %add
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %add
+ %0 = load i32, ptr %arrayidx, align 4
%mul7 = mul nsw i32 %0, %scale
- %arrayidx10 = getelementptr inbounds i32, i32* %C, i32 %add
- store i32 %mul7, i32* %arrayidx10, align 4
+ %arrayidx10 = getelementptr inbounds i32, ptr %C, i32 %add
+ store i32 %mul7, ptr %arrayidx10, align 4
%inc = add nuw nsw i32 %j.026, 1
%exitcond = icmp ne i32 %inc, %call
br i1 %exitcond, label %for.body6, label %for.cond.cleanup5.loopexit
}
; Inner loop has an early exit
-define hidden void @test_6(i16 zeroext %N, i32* nocapture %C, i32* nocapture readonly %A, i32 %scale) {
+define hidden void @test_6(i16 zeroext %N, ptr nocapture %C, ptr nocapture readonly %A, i32 %scale) {
entry:
%conv = zext i16 %N to i32
%cmp39 = icmp eq i16 %N, 0
for.body7.us: ; preds = %for.body.us, %if.end.us
%j.038.us = phi i32 [ 0, %for.body.us ], [ %inc.us, %if.end.us ]
%add.us = add nuw nsw i32 %j.038.us, %mul.us
- %arrayidx.us = getelementptr inbounds i32, i32* %A, i32 %add.us
- %0 = load i32, i32* %arrayidx.us, align 4
+ %arrayidx.us = getelementptr inbounds i32, ptr %A, i32 %add.us
+ %0 = load i32, ptr %arrayidx.us, align 4
%tobool.us = icmp eq i32 %0, 0
br i1 %tobool.us, label %if.end.us, label %cleanup.us
br i1 %exitcond, label %for.cond.cleanup, label %for.body.us
if.end.us: ; preds = %for.body7.us
- %arrayidx17.us = getelementptr inbounds i32, i32* %C, i32 %add.us
- store i32 0, i32* %arrayidx17.us, align 4
+ %arrayidx17.us = getelementptr inbounds i32, ptr %C, i32 %add.us
+ store i32 0, ptr %arrayidx17.us, align 4
%inc.us = add nuw nsw i32 %j.038.us, 1
%cmp4.us = icmp ult i32 %inc.us, %conv
br i1 %cmp4.us, label %for.body7.us, label %cleanup.us
ret void
}
-define hidden void @test_7(i16 zeroext %N, i32* nocapture %C, i32* nocapture readonly %A, i32 %scale) {
+define hidden void @test_7(i16 zeroext %N, ptr nocapture %C, ptr nocapture readonly %A, i32 %scale) {
entry:
%conv = zext i16 %N to i32
%cmp30 = icmp eq i16 %N, 0
for.body7.us: ; preds = %for.body7.us, %for.body7.lr.ph.us
%j.029.us = phi i32 [ 0, %for.body7.lr.ph.us ], [ %inc.us, %for.body7.us ]
%add.us = add nuw nsw i32 %j.029.us, %mul.us
- %arrayidx.us = getelementptr inbounds i32, i32* %A, i32 %add.us
- %0 = load i32, i32* %arrayidx.us, align 4
+ %arrayidx.us = getelementptr inbounds i32, ptr %A, i32 %add.us
+ %0 = load i32, ptr %arrayidx.us, align 4
%mul9.us = mul nsw i32 %0, %scale
- %arrayidx13.us = getelementptr inbounds i32, i32* %C, i32 %add.us
- store i32 %mul9.us, i32* %arrayidx13.us, align 4
+ %arrayidx13.us = getelementptr inbounds i32, ptr %C, i32 %add.us
+ store i32 %mul9.us, ptr %arrayidx13.us, align 4
%inc.us = add nuw nsw i32 %j.029.us, 1
%exitcond = icmp eq i32 %inc.us, %conv
br i1 %exitcond, label %for.cond2.for.cond.cleanup6_crit_edge.us, label %for.body7.us
}
; Step is not 1
-define i32 @test_8(i32 %val, i16* nocapture %A) {
+define i32 @test_8(i32 %val, ptr nocapture %A) {
entry:
br label %for.body
for.body3: ; preds = %for.body, %for.body3
%j.017 = phi i32 [ 0, %for.body ], [ %inc, %for.body3 ]
%add = add nuw nsw i32 %j.017, %mul
- %arrayidx = getelementptr inbounds i16, i16* %A, i32 %add
- %0 = load i16, i16* %arrayidx, align 2
+ %arrayidx = getelementptr inbounds i16, ptr %A, i32 %add
+ %0 = load i16, ptr %arrayidx, align 2
%conv16 = zext i16 %0 to i32
%add4 = add i32 %conv16, %val
%conv5 = trunc i32 %add4 to i16
- store i16 %conv5, i16* %arrayidx, align 2
+ store i16 %conv5, ptr %arrayidx, align 2
%inc = add nuw nsw i32 %j.017, 1
%exitcond = icmp ne i32 %inc, 20
br i1 %exitcond, label %for.body3, label %for.inc6
; Step is not 1
-define i32 @test_9(i32 %val, i16* nocapture %A) {
+define i32 @test_9(i32 %val, ptr nocapture %A) {
entry:
br label %for.body
for.body3: ; preds = %for.body, %for.body3
%j.017 = phi i32 [ 0, %for.body ], [ %inc, %for.body3 ]
%add = add nuw nsw i32 %j.017, %mul
- %arrayidx = getelementptr inbounds i16, i16* %A, i32 %add
- %0 = load i16, i16* %arrayidx, align 2
+ %arrayidx = getelementptr inbounds i16, ptr %A, i32 %add
+ %0 = load i16, ptr %arrayidx, align 2
%conv16 = zext i16 %0 to i32
%add4 = add i32 %conv16, %val
%conv5 = trunc i32 %add4 to i16
- store i16 %conv5, i16* %arrayidx, align 2
+ store i16 %conv5, ptr %arrayidx, align 2
%inc = add nuw nsw i32 %j.017, 2
%exitcond = icmp ne i32 %inc, 20
br i1 %exitcond, label %for.body3, label %for.inc6
; icmp ult i32 %j, tripcount-step.
; test_10: The step is not 1.
-define i32 @test_10(i32* nocapture %A) {
+define i32 @test_10(ptr nocapture %A) {
entry:
br label %for.cond1.preheader
for.body4:
%j.016 = phi i32 [ 0, %for.cond1.preheader ], [ %add5, %for.body4 ]
%add = add i32 %j.016, %mul
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %add
- store i32 30, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %add
+ store i32 30, ptr %arrayidx, align 4
%add5 = add nuw nsw i32 %j.016, 2
%cmp2 = icmp ult i32 %j.016, 18
br i1 %cmp2, label %for.body4, label %for.cond.cleanup3
br i1 %cmp, label %for.cond1.preheader, label %for.cond.cleanup
for.cond.cleanup:
- %0 = load i32, i32* %A, align 4
+ %0 = load i32, ptr %A, align 4
ret i32 %0
}
; test_11: The inner inducation variable is used in a compare which
; isn't the condition of the inner branch.
-define i32 @test_11(i32* nocapture %A) {
+define i32 @test_11(ptr nocapture %A) {
entry:
br label %for.cond1.preheader
%cmp5 = icmp ult i32 %j.019, 5
%cond = select i1 %cmp5, i32 30, i32 15
%add = add i32 %j.019, %mul
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %add
- store i32 %cond, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %add
+ store i32 %cond, ptr %arrayidx, align 4
%inc = add nuw nsw i32 %j.019, 1
%cmp2 = icmp ult i32 %j.019, 19
br i1 %cmp2, label %for.body4, label %for.cond.cleanup3
br i1 %cmp, label %for.cond1.preheader, label %for.cond.cleanup
for.cond.cleanup:
- %0 = load i32, i32* %A, align 4
+ %0 = load i32, ptr %A, align 4
ret i32 %0
}
; test_12: Incoming phi node value for preheader is a variable
-define i32 @test_12(i32* %A) {
+define i32 @test_12(ptr %A) {
entry:
br label %while.cond1.preheader
while.body3:
%j.115 = phi i32 [ %inc, %while.body3 ], [ %j.017, %while.body3.preheader ]
%add = add i32 %j.115, %mul
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %add
- store i32 30, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %add
+ store i32 30, ptr %arrayidx, align 4
%inc = add nuw nsw i32 %j.115, 1
%cmp2 = icmp ult i32 %j.115, 19
br i1 %cmp2, label %while.body3, label %while.end.loopexit
br i1 %cmp, label %while.cond1.preheader, label %while.end5
while.end5:
- %0 = load i32, i32* %A, align 4
+ %0 = load i32, ptr %A, align 4
ret i32 %0
}
%mul9.us.us = mul nsw i32 %add.us.us, %N
%add10.us.us = add nsw i32 %mul9.us.us, %i.036.us
%idxprom.us.us = sext i32 %add10.us.us to i64
- %arrayidx.us.us = getelementptr inbounds i32, i32* %A, i64 %idxprom.us.us
- tail call void @f(i32* %arrayidx.us.us) #2
+ %arrayidx.us.us = getelementptr inbounds i32, ptr %A, i64 %idxprom.us.us
+ tail call void @f(ptr %arrayidx.us.us) #2
%inc.us.us = add nuw nsw i32 %k.031.us.us, 1
%cmp6.us.us = icmp slt i32 %inc.us.us, %N
br i1 %cmp6.us.us, label %for.body8.us.us, label %for.cond5.for.cond.cleanup7_crit_edge.us.us
%j.026.us.us = phi i32 [ 0, %for.cond5.preheader.us.us ], [ %inc.us.us, %for.body8.us.us ]
%add.us.us = add nsw i32 %j.026.us.us, %mul.us.us
%idxprom.us.us = sext i32 %add.us.us to i64
- %arrayidx.us.us = getelementptr inbounds i32, i32* %A, i64 %idxprom.us.us
- tail call void @f(i32* %arrayidx.us.us) #2
+ %arrayidx.us.us = getelementptr inbounds i32, ptr %A, i64 %idxprom.us.us
+ tail call void @f(ptr %arrayidx.us.us) #2
%inc.us.us = add nuw nsw i32 %j.026.us.us, 1
%exitcond = icmp ne i32 %inc.us.us, %M
br i1 %exitcond, label %for.body8.us.us, label %for.cond5.for.cond.cleanup7_crit_edge.us.us
%j.028.us.us = phi i32 [ %inc10.us.us, %for.cond5.for.cond.cleanup7_crit_edge.us.us ], [ 0, %for.body4.us.us.preheader ]
%add.us.us = add nsw i32 %j.028.us.us, %mul.us
%idxprom.us.us = sext i32 %add.us.us to i64
- %arrayidx.us.us = getelementptr inbounds i32, i32* %A, i64 %idxprom.us.us
- store i32 0, i32* %arrayidx.us.us, align 4
+ %arrayidx.us.us = getelementptr inbounds i32, ptr %A, i64 %idxprom.us.us
+ store i32 0, ptr %arrayidx.us.us, align 4
br label %for.body8.us.us
for.cond5.for.cond.cleanup7_crit_edge.us.us:
for.body8.us.us:
%k.026.us.us = phi i32 [ 0, %for.body4.us.us ], [ %inc.us.us, %for.body8.us.us ]
- tail call void bitcast (void (...)* @g to void ()*)() #2
+ tail call void @g() #2
%inc.us.us = add nuw nsw i32 %k.026.us.us, 1
%exitcond = icmp ne i32 %inc.us.us, %N
br i1 %exitcond, label %for.body8.us.us, label %for.cond5.for.cond.cleanup7_crit_edge.us.us
; Backedge-taken count is not predictable.
%struct.Limits = type { i16, i16 }
-define void @backedge_count(%struct.Limits* %lim) {
+define void @backedge_count(ptr %lim) {
entry:
- %N = getelementptr inbounds %struct.Limits, %struct.Limits* %lim, i32 0, i32 0
- %M = getelementptr inbounds %struct.Limits, %struct.Limits* %lim, i32 0, i32 1
- %0 = load i16, i16* %N, align 2
+ %M = getelementptr inbounds %struct.Limits, ptr %lim, i32 0, i32 1
+ %0 = load i16, ptr %lim, align 2
%cmp20 = icmp sgt i16 %0, 0
br i1 %cmp20, label %for.cond2.preheader.preheader, label %for.cond.cleanup
for.cond2.preheader.preheader:
- %.pre = load i16, i16* %M, align 2
+ %.pre = load i16, ptr %M, align 2
br label %for.cond2.preheader
for.cond2.preheader:
ret void
for.cond.cleanup6.loopexit:
- %.pre22 = load i16, i16* %N, align 2
+ %.pre22 = load i16, ptr %lim, align 2
br label %for.cond.cleanup6
for.cond.cleanup6:
for.body7:
%j.018 = phi i32 [ %inc, %for.body7 ], [ 0, %for.cond2.preheader ]
- tail call void bitcast (void (...)* @g to void ()*)()
+ tail call void @g()
%inc = add nuw nsw i32 %j.018, 1
- %5 = load i16, i16* %M, align 2
+ %5 = load i16, ptr %M, align 2
%conv3 = sext i16 %5 to i32
%cmp4 = icmp slt i32 %inc, %conv3
br i1 %cmp4, label %for.body7, label %for.cond.cleanup6.loopexit
}
; Invalid trip count
-define void @invalid_tripCount(i8* %a, i32 %b, i32 %c, i32 %initial-mutations, i32 %statemutations) {
+define void @invalid_tripCount(ptr %a, i32 %b, i32 %c, i32 %initial-mutations, i32 %statemutations) {
entry:
%iszero = icmp eq i32 %b, 0
br i1 %iszero, label %for.empty, label %for.loopinit
%0 = icmp eq i32 %statemutations, %initial-mutations
br i1 %0, label %for.notmutated, label %for.mutated
for.mutated:
- call void @objc_enumerationMutation(i8* %a)
+ call void @objc_enumerationMutation(ptr %a)
br label %for.notmutated
for.notmutated:
%1 = add nuw i32 %for.index, 1
; GEP doesn't dominate the loop latch so can't guarantee N*M won't overflow.
@first = global i32 1, align 4
@a = external global [0 x i8], align 1
-define void @overflow(i32 %lim, i8* %a) {
+define void @overflow(i32 %lim, ptr %a) {
entry:
%cmp17.not = icmp eq i32 %lim, 0
br i1 %cmp17.not, label %for.cond.cleanup, label %for.cond1.preheader.preheader
for.body4:
%j.016 = phi i32 [ 0, %for.cond1.preheader ], [ %inc, %if.end ]
%add = add i32 %j.016, %mul
- %0 = load i32, i32* @first, align 4
+ %0 = load i32, ptr @first, align 4
%tobool.not = icmp eq i32 %0, 0
br i1 %tobool.not, label %if.end, label %if.then
if.then:
- %arrayidx = getelementptr inbounds [0 x i8], [0 x i8]* @a, i32 0, i32 %add
- %1 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds [0 x i8], ptr @a, i32 0, i32 %add
+ %1 = load i8, ptr %arrayidx, align 1
tail call void asm sideeffect "", "r"(i8 %1)
- store i32 0, i32* @first, align 4
+ store i32 0, ptr @first, align 4
br label %if.end
if.end:
br i1 %cmp2, label %for.body4, label %for.cond.cleanup3
}
-declare void @objc_enumerationMutation(i8*)
-declare dso_local void @f(i32*)
+declare void @objc_enumerationMutation(ptr)
+declare dso_local void @f(ptr)
declare dso_local void @g(...)
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
-define void @test(i64 %N, i64* %A, i64 %val) {
+define void @test(i64 %N, ptr %A, i64 %val) {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i64 [[N:%.*]], 0
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[INC6:%.*]], [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_COND_PREHEADER_PREHEADER]] ]
; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[I]], [[N]]
; CHECK-NEXT: [[ADD:%.*]] = add i64 0, [[MUL]]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[I]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[I]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD4:%.*]] = add nsw i64 [[TMP0]], [[VAL:%.*]]
-; CHECK-NEXT: store i64 [[ADD4]], i64* [[ARRAYIDX]], align 4
+; CHECK-NEXT: store i64 [[ADD4]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INC:%.*]] = add nuw nsw i64 0, 1
; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[INC]], [[N]]
; CHECK-NEXT: [[INC6]] = add nuw nsw i64 [[I]], 1
for.body:
%j = phi i64 [ 0, %for.cond.preheader ], [ %inc, %for.body ]
%add = add i64 %j, %mul
- %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
- %0 = load i64, i64* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i64, ptr %A, i64 %add
+ %0 = load i64, ptr %arrayidx, align 4
%add4 = add nsw i64 %0, %val
- store i64 %add4, i64* %arrayidx, align 4
+ store i64 %add4, ptr %arrayidx, align 4
%inc = add nuw nsw i64 %j, 1
%cmp2 = icmp ult i64 %inc, %N
br i1 %cmp2, label %for.body, label %for.cond.for.inc_crit_edge
; CHECK-LABEL: test1
; Simple loop where the IV's is constant
-define i32 @test1(i32 %val, i16* nocapture %A) {
+define i32 @test1(i32 %val, ptr nocapture %A) {
entry:
br label %for.body
; CHECK: entry:
for.body3: ; preds = %for.body, %for.body3
%j.017 = phi i32 [ 0, %for.body ], [ %inc, %for.body3 ]
%add = add nuw nsw i32 %j.017, %mul
- %arrayidx = getelementptr inbounds i16, i16* %A, i32 %add
- %0 = load i16, i16* %arrayidx, align 2
+ %arrayidx = getelementptr inbounds i16, ptr %A, i32 %add
+ %0 = load i16, ptr %arrayidx, align 2
%conv16 = zext i16 %0 to i32
%add4 = add i32 %conv16, %val
%conv5 = trunc i32 %add4 to i16
- store i16 %conv5, i16* %arrayidx, align 2
+ store i16 %conv5, ptr %arrayidx, align 2
%inc = add nuw nsw i32 %j.017, 1
%exitcond = icmp ne i32 %inc, 20
br i1 %exitcond, label %for.body3, label %for.inc6
; CHECK: for.body3:
; CHECK: %j.017 = phi i32 [ 0, %for.body ]
; CHECK: %add = add nuw nsw i32 %j.017, %mul
-; CHECK: %arrayidx = getelementptr inbounds i16, i16* %A, i32 %i.018
-; CHECK: %0 = load i16, i16* %arrayidx, align 2
+; CHECK: %arrayidx = getelementptr inbounds i16, ptr %A, i32 %i.018
+; CHECK: %0 = load i16, ptr %arrayidx, align 2
; CHECK: %conv16 = zext i16 %0 to i32
; CHECK: %add4 = add i32 %conv16, %val
; CHECK: %conv5 = trunc i32 %add4 to i16
-; CHECK: store i16 %conv5, i16* %arrayidx, align 2
+; CHECK: store i16 %conv5, ptr %arrayidx, align 2
; CHECK: %inc = add nuw nsw i32 %j.017, 1
; CHECK: %exitcond = icmp ne i32 %inc, 20
; CHECK: br label %for.inc6
; CHECK-LABEL: test2
; Same as above but non constant IV (which still cannot overflow)
-define i32 @test2(i8 zeroext %I, i32 %val, i16* nocapture %A) {
+define i32 @test2(i8 zeroext %I, i32 %val, ptr nocapture %A) {
entry:
%conv = zext i8 %I to i32
%cmp26 = icmp eq i8 %I, 0
for.body6.us: ; preds = %for.body.us, %for.body6.us
%j.025.us = phi i32 [ 0, %for.body.us ], [ %inc.us, %for.body6.us ]
%add.us = add nuw nsw i32 %j.025.us, %mul.us
- %arrayidx.us = getelementptr inbounds i16, i16* %A, i32 %add.us
- %0 = load i16, i16* %arrayidx.us, align 2
+ %arrayidx.us = getelementptr inbounds i16, ptr %A, i32 %add.us
+ %0 = load i16, ptr %arrayidx.us, align 2
%conv823.us = zext i16 %0 to i32
%add9.us = add i32 %conv823.us, %val
%conv10.us = trunc i32 %add9.us to i16
- store i16 %conv10.us, i16* %arrayidx.us, align 2
+ store i16 %conv10.us, ptr %arrayidx.us, align 2
%inc.us = add nuw nsw i32 %j.025.us, 1
%exitcond = icmp ne i32 %inc.us, %conv
br i1 %exitcond, label %for.body6.us, label %for.cond2.for.inc11_crit_edge.us
; CHECK: for.body6.us:
; CHECK: %j.025.us = phi i32 [ 0, %for.body.us ]
; CHECK: %add.us = add nuw nsw i32 %j.025.us, %mul.us
-; CHECK: %arrayidx.us = getelementptr inbounds i16, i16* %A, i32 %i.027.us
-; CHECK: %0 = load i16, i16* %arrayidx.us, align 2
+; CHECK: %arrayidx.us = getelementptr inbounds i16, ptr %A, i32 %i.027.us
+; CHECK: %0 = load i16, ptr %arrayidx.us, align 2
; CHECK: %conv823.us = zext i16 %0 to i32
; CHECK: %add9.us = add i32 %conv823.us, %val
; CHECK: %conv10.us = trunc i32 %add9.us to i16
-; CHECK: store i16 %conv10.us, i16* %arrayidx.us, align 2
+; CHECK: store i16 %conv10.us, ptr %arrayidx.us, align 2
; CHECK: %inc.us = add nuw nsw i32 %j.025.us, 1
; CHECK: %exitcond = icmp ne i32 %inc.us, %conv
; CHECK: br label %for.cond2.for.inc11_crit_edge.us
; CHECK-LABEL: test3
; Same as above, uses load to determine it can't overflow
-define i32 @test3(i32 %N, i32 %val, i16* nocapture %A) local_unnamed_addr #0 {
+define i32 @test3(i32 %N, i32 %val, ptr nocapture %A) local_unnamed_addr #0 {
entry:
%cmp21 = icmp eq i32 %N, 0
br i1 %cmp21, label %for.end8, label %for.body.lr.ph.split.us
for.body3.us: ; preds = %for.body.us, %for.body3.us
%j.020.us = phi i32 [ 0, %for.body.us ], [ %inc.us, %for.body3.us ]
%add.us = add i32 %j.020.us, %mul.us
- %arrayidx.us = getelementptr inbounds i16, i16* %A, i32 %add.us
- %0 = load i16, i16* %arrayidx.us, align 2
+ %arrayidx.us = getelementptr inbounds i16, ptr %A, i32 %add.us
+ %0 = load i16, ptr %arrayidx.us, align 2
%conv18.us = zext i16 %0 to i32
%add4.us = add i32 %conv18.us, %val
%conv5.us = trunc i32 %add4.us to i16
- store i16 %conv5.us, i16* %arrayidx.us, align 2
+ store i16 %conv5.us, ptr %arrayidx.us, align 2
%inc.us = add nuw i32 %j.020.us, 1
%exitcond = icmp ne i32 %inc.us, %N
br i1 %exitcond, label %for.body3.us, label %for.cond1.for.inc6_crit_edge.us
; CHECK: for.body3.us:
; CHECK: %j.020.us = phi i32 [ 0, %for.body.us ]
; CHECK: %add.us = add i32 %j.020.us, %mul.us
-; CHECK: %arrayidx.us = getelementptr inbounds i16, i16* %A, i32 %i.022.us
-; CHECK: %0 = load i16, i16* %arrayidx.us, align 2
+; CHECK: %arrayidx.us = getelementptr inbounds i16, ptr %A, i32 %i.022.us
+; CHECK: %0 = load i16, ptr %arrayidx.us, align 2
; CHECK: %conv18.us = zext i16 %0 to i32
; CHECK: %add4.us = add i32 %conv18.us, %val
; CHECK: %conv5.us = trunc i32 %add4.us to i16
-; CHECK: store i16 %conv5.us, i16* %arrayidx.us, align 2
+; CHECK: store i16 %conv5.us, ptr %arrayidx.us, align 2
; CHECK: %inc.us = add nuw i32 %j.020.us, 1
; CHECK: %exitcond = icmp ne i32 %inc.us, %N
; CHECK: br label %for.cond1.for.inc6_crit_edge.us
; CHECK-LABEL: test4
; Multiplication cannot overflow, so we can replace the original loop.
-define void @test4(i16 zeroext %N, i32* nocapture %C, i32* nocapture readonly %A, i32 %scale) {
+define void @test4(i16 zeroext %N, ptr nocapture %C, ptr nocapture readonly %A, i32 %scale) {
entry:
%conv = zext i16 %N to i32
%cmp30 = icmp eq i16 %N, 0
; CHECK: for.body7.us:
%j.029.us = phi i32 [ 0, %for.body.us ], [ %inc.us, %for.body7.us ]
%add.us = add nuw nsw i32 %j.029.us, %mul.us
- %arrayidx.us = getelementptr inbounds i32, i32* %A, i32 %add.us
-; CHECK: getelementptr inbounds i32, i32* %A, i32 %[[OUTER_IV]]
- %0 = load i32, i32* %arrayidx.us, align 4
+ %arrayidx.us = getelementptr inbounds i32, ptr %A, i32 %add.us
+; CHECK: getelementptr inbounds i32, ptr %A, i32 %[[OUTER_IV]]
+ %0 = load i32, ptr %arrayidx.us, align 4
%mul9.us = mul nsw i32 %0, %scale
-; CHECK: getelementptr inbounds i32, i32* %C, i32 %[[OUTER_IV]]
- %arrayidx13.us = getelementptr inbounds i32, i32* %C, i32 %add.us
- store i32 %mul9.us, i32* %arrayidx13.us, align 4
+; CHECK: getelementptr inbounds i32, ptr %C, i32 %[[OUTER_IV]]
+ %arrayidx13.us = getelementptr inbounds i32, ptr %C, i32 %add.us
+ store i32 %mul9.us, ptr %arrayidx13.us, align 4
%inc.us = add nuw nsw i32 %j.029.us, 1
%exitcond = icmp ne i32 %inc.us, %conv
br i1 %exitcond, label %for.body7.us, label %for.cond2.for.cond.cleanup6_crit_edge.us
; CHECK-LABEL: test7
; Various inner phis and conditions which we can still work with
-define signext i16 @test7(i32 %I, i32 %J, i32* nocapture readonly %C, i16 signext %limit) {
+define signext i16 @test7(i32 %I, i32 %J, ptr nocapture readonly %C, i16 signext %limit) {
entry:
%cmp43 = icmp eq i32 %J, 0
br i1 %cmp43, label %for.end17, label %for.body.lr.ph
%prev.138.us = phi i32 [ %prev.045.us, %for.body.us ], [ %0, %if.end.us ]
%tmp.137.us = phi i32 [ %tmp.044.us, %for.body.us ], [ %tmp.2.us, %if.end.us ]
%add.us = add i32 %j.040.us, %mul.us
- %arrayidx.us = getelementptr inbounds i32, i32* %C, i32 %add.us
- %0 = load i32, i32* %arrayidx.us, align 4
+ %arrayidx.us = getelementptr inbounds i32, ptr %C, i32 %add.us
+ %0 = load i32, ptr %arrayidx.us, align 4
%add4.us = add nsw i32 %0, %tmp.137.us
%cmp5.us = icmp sgt i32 %add4.us, %conv
br i1 %cmp5.us, label %if.then.us, label %if.else.us
; CHECK: %prev.138.us = phi i32 [ %prev.045.us, %for.body.us ]
; CHECK: %tmp.137.us = phi i32 [ %tmp.044.us, %for.body.us ]
; CHECK: %add.us = add i32 %j.040.us, %mul.us
-; CHECK: %arrayidx.us = getelementptr inbounds i32, i32* %C, i32 %i.047.us
-; CHECK: %0 = load i32, i32* %arrayidx.us, align 4
+; CHECK: %arrayidx.us = getelementptr inbounds i32, ptr %C, i32 %i.047.us
+; CHECK: %0 = load i32, ptr %arrayidx.us, align 4
; CHECK: %add4.us = add nsw i32 %0, %tmp.137.us
; CHECK: %cmp5.us = icmp sgt i32 %add4.us, %conv
; CHECK: br i1 %cmp5.us, label %if.then.us, label %if.else.us
; CHECK-LABEL: test8
; Same as test1, but with different continue block order
; (uses icmp eq and loops on false)
-define i32 @test8(i32 %val, i16* nocapture %A) {
+define i32 @test8(i32 %val, ptr nocapture %A) {
entry:
br label %for.body
; CHECK: entry:
for.body3: ; preds = %for.body, %for.body3
%j.017 = phi i32 [ 0, %for.body ], [ %inc, %for.body3 ]
%add = add nuw nsw i32 %j.017, %mul
- %arrayidx = getelementptr inbounds i16, i16* %A, i32 %add
- %0 = load i16, i16* %arrayidx, align 2
+ %arrayidx = getelementptr inbounds i16, ptr %A, i32 %add
+ %0 = load i16, ptr %arrayidx, align 2
%conv16 = zext i16 %0 to i32
%add4 = add i32 %conv16, %val
%conv5 = trunc i32 %add4 to i16
- store i16 %conv5, i16* %arrayidx, align 2
+ store i16 %conv5, ptr %arrayidx, align 2
%inc = add nuw nsw i32 %j.017, 1
%exitcond = icmp eq i32 %inc, 20
br i1 %exitcond, label %for.inc6, label %for.body3
; CHECK: for.body3:
; CHECK: %j.017 = phi i32 [ 0, %for.body ]
; CHECK: %add = add nuw nsw i32 %j.017, %mul
-; CHECK: %arrayidx = getelementptr inbounds i16, i16* %A, i32 %i.018
-; CHECK: %0 = load i16, i16* %arrayidx, align 2
+; CHECK: %arrayidx = getelementptr inbounds i16, ptr %A, i32 %i.018
+; CHECK: %0 = load i16, ptr %arrayidx, align 2
; CHECK: %conv16 = zext i16 %0 to i32
; CHECK: %add4 = add i32 %conv16, %val
; CHECK: %conv5 = trunc i32 %add4 to i16
-; CHECK: store i16 %conv5, i16* %arrayidx, align 2
+; CHECK: store i16 %conv5, ptr %arrayidx, align 2
; CHECK: %inc = add nuw nsw i32 %j.017, 1
; CHECK: %exitcond = icmp eq i32 %inc, 20
; CHECK: br label %for.inc6
; match the pattern (OuterPHI * InnerTripCount) + InnerPHI but
; we should still flatten the loop as the compare is removed
; later anyway.
-define i32 @test9(i32* nocapture %A) {
+define i32 @test9(ptr nocapture %A) {
entry:
br label %for.cond1.preheader
; CHECK-LABEL: test9
for.body4:
%j.016 = phi i32 [ 0, %for.cond1.preheader ], [ %inc, %for.body4 ]
%add = add i32 %j.016, %mul
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %add
- store i32 30, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %add
+ store i32 30, ptr %arrayidx, align 4
%inc = add nuw nsw i32 %j.016, 1
%cmp2 = icmp ult i32 %j.016, 19
br i1 %cmp2, label %for.body4, label %for.cond.cleanup3
; CHECK: for.body4
; CHECK: %j.016 = phi i32 [ 0, %for.cond1.preheader ]
; CHECK: %add = add i32 %j.016, %mul
-; CHECK: %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.017
-; CHECK: store i32 30, i32* %arrayidx, align 4
+; CHECK: %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i.017
+; CHECK: store i32 30, ptr %arrayidx, align 4
; CHECK: %inc = add nuw nsw i32 %j.016, 1
; CHECK: %cmp2 = icmp ult i32 %j.016, 19
; CHECK: br label %for.cond.cleanup3
for.cond.cleanup:
- %0 = load i32, i32* %A, align 4
+ %0 = load i32, ptr %A, align 4
ret i32 %0
}
; DONTWIDEN-NOT: %flatten.trunciv
; Function Attrs: nounwind
-define void @foo(i32* %A, i32 %N, i32 %M) {
+define void @foo(ptr %A, i32 %N, i32 %M) {
; CHECK-LABEL: @foo(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP17:%.*]] = icmp sgt i32 [[N:%.*]], 0
; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[INDVAR]] to i32
; CHECK-NEXT: [[ADD_US:%.*]] = add nsw i32 [[TMP3]], [[MUL_US]]
; CHECK-NEXT: [[IDXPROM_US:%.*]] = sext i32 [[FLATTEN_TRUNCIV]] to i64
-; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[IDXPROM_US]]
-; CHECK-NEXT: tail call void @f(i32* [[ARRAYIDX_US]])
+; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IDXPROM_US]]
+; CHECK-NEXT: tail call void @f(ptr [[ARRAYIDX_US]])
; CHECK-NEXT: [[INDVAR_NEXT:%.*]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[CMP2_US:%.*]] = icmp slt i64 [[INDVAR_NEXT]], [[TMP0]]
; CHECK-NEXT: br label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US]]
; DONTWIDEN-NEXT: [[J_016_US:%.*]] = phi i32 [ 0, [[FOR_COND1_PREHEADER_US]] ], [ [[INC_US:%.*]], [[FOR_BODY4_US]] ]
; DONTWIDEN-NEXT: [[ADD_US:%.*]] = add nsw i32 [[J_016_US]], [[MUL_US]]
; DONTWIDEN-NEXT: [[IDXPROM_US:%.*]] = sext i32 [[ADD_US]] to i64
-; DONTWIDEN-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[IDXPROM_US]]
-; DONTWIDEN-NEXT: tail call void @f(i32* [[ARRAYIDX_US]])
+; DONTWIDEN-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IDXPROM_US]]
+; DONTWIDEN-NEXT: tail call void @f(ptr [[ARRAYIDX_US]])
; DONTWIDEN-NEXT: [[INC_US]] = add nuw nsw i32 [[J_016_US]], 1
; DONTWIDEN-NEXT: [[CMP2_US:%.*]] = icmp slt i32 [[INC_US]], [[M]]
; DONTWIDEN-NEXT: br i1 [[CMP2_US]], label [[FOR_BODY4_US]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US]]
%j.016.us = phi i32 [ 0, %for.cond1.preheader.us ], [ %inc.us, %for.body4.us ]
%add.us = add nsw i32 %j.016.us, %mul.us
%idxprom.us = sext i32 %add.us to i64
- %arrayidx.us = getelementptr inbounds i32, i32* %A, i64 %idxprom.us
- tail call void @f(i32* %arrayidx.us) #2
+ %arrayidx.us = getelementptr inbounds i32, ptr %A, i64 %idxprom.us
+ tail call void @f(ptr %arrayidx.us) #2
%inc.us = add nuw nsw i32 %j.016.us, 1
%cmp2.us = icmp slt i32 %inc.us, %M
br i1 %cmp2.us, label %for.body4.us, label %for.cond1.for.cond.cleanup3_crit_edge.us
; CHECK-NEXT: [[TMP7:%.*]] = add nsw i64 [[TMP6]], [[TMP3]]
; CHECK-NEXT: [[ADD_US:%.*]] = add nsw i32 [[J_016_US]], [[MUL_US]]
; CHECK-NEXT: [[IDXPROM_US:%.*]] = sext i32 [[FLATTEN_TRUNCIV]] to i64
-; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVAR2]]
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX_US]], align 4
+; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVAR2]]
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX_US]], align 4
; CHECK-NEXT: tail call void @g(i32 [[TMP8]])
; CHECK-NEXT: [[INDVAR_NEXT:%.*]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[INC_US:%.*]] = add nuw nsw i32 [[J_016_US]], 1
; DONTWIDEN-NEXT: [[J_016_US:%.*]] = phi i32 [ 0, [[FOR_COND1_PREHEADER_US]] ], [ [[INC_US:%.*]], [[FOR_BODY4_US]] ]
; DONTWIDEN-NEXT: [[ADD_US:%.*]] = add nsw i32 [[J_016_US]], [[MUL_US]]
; DONTWIDEN-NEXT: [[IDXPROM_US:%.*]] = sext i32 [[ADD_US]] to i64
-; DONTWIDEN-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[IDXPROM_US]]
-; DONTWIDEN-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX_US]], align 4
+; DONTWIDEN-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IDXPROM_US]]
+; DONTWIDEN-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX_US]], align 4
; DONTWIDEN-NEXT: tail call void @g(i32 [[TMP0]])
; DONTWIDEN-NEXT: [[INC_US]] = add nuw nsw i32 [[J_016_US]], 1
; DONTWIDEN-NEXT: [[CMP2_US:%.*]] = icmp slt i32 [[INC_US]], [[M]]
%j.016.us = phi i32 [ 0, %for.cond1.preheader.us ], [ %inc.us, %for.body4.us ]
%add.us = add nsw i32 %j.016.us, %mul.us
%idxprom.us = sext i32 %add.us to i64
- %arrayidx.us = getelementptr inbounds i32, i32* %A, i64 %idxprom.us
- %0 = load i32, i32* %arrayidx.us, align 4
+ %arrayidx.us = getelementptr inbounds i32, ptr %A, i64 %idxprom.us
+ %0 = load i32, ptr %arrayidx.us, align 4
tail call void @g(i32 %0)
%inc.us = add nuw nsw i32 %j.016.us, 1
%cmp2.us = icmp slt i32 %inc.us, %M
; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[INDVAR]] to i32
; CHECK-NEXT: [[ADD_US:%.*]] = add i32 [[TMP3]], [[MUL_US]]
; CHECK-NEXT: [[IDXPROM_US:%.*]] = zext i32 [[FLATTEN_TRUNCIV]] to i64
-; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[IDXPROM_US]]
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX_US]], align 4
+; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IDXPROM_US]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX_US]], align 4
; CHECK-NEXT: tail call void @g(i32 [[TMP4]])
; CHECK-NEXT: [[INDVAR_NEXT:%.*]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[CMP2_US:%.*]] = icmp ult i64 [[INDVAR_NEXT]], [[TMP0]]
; DONTWIDEN-NEXT: [[J_016_US:%.*]] = phi i32 [ 0, [[FOR_COND1_PREHEADER_US]] ], [ [[INC_US:%.*]], [[FOR_BODY4_US]] ]
; DONTWIDEN-NEXT: [[ADD_US:%.*]] = add i32 [[J_016_US]], [[MUL_US]]
; DONTWIDEN-NEXT: [[IDXPROM_US:%.*]] = zext i32 [[ADD_US]] to i64
-; DONTWIDEN-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[IDXPROM_US]]
-; DONTWIDEN-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX_US]], align 4
+; DONTWIDEN-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IDXPROM_US]]
+; DONTWIDEN-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX_US]], align 4
; DONTWIDEN-NEXT: tail call void @g(i32 [[TMP0]])
; DONTWIDEN-NEXT: [[INC_US]] = add nuw i32 [[J_016_US]], 1
; DONTWIDEN-NEXT: [[CMP2_US:%.*]] = icmp ult i32 [[INC_US]], [[M]]
%j.016.us = phi i32 [ 0, %for.cond1.preheader.us ], [ %inc.us, %for.body4.us ]
%add.us = add i32 %j.016.us, %mul.us
%idxprom.us = zext i32 %add.us to i64
- %arrayidx.us = getelementptr inbounds i32, i32* %A, i64 %idxprom.us
- %0 = load i32, i32* %arrayidx.us, align 4
+ %arrayidx.us = getelementptr inbounds i32, ptr %A, i64 %idxprom.us
+ %0 = load i32, ptr %arrayidx.us, align 4
tail call void @g(i32 %0)
%inc.us = add nuw i32 %j.016.us, 1
%cmp2.us = icmp ult i32 %inc.us, %M
ret void
}
-define void @zext(i32 %N, i16* nocapture %A, i16 %val) {
+define void @zext(i32 %N, ptr nocapture %A, i16 %val) {
; CHECK-LABEL: @zext(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP20_NOT:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[INDVAR]] to i32
; CHECK-NEXT: [[ADD_US:%.*]] = add i32 [[TMP3]], [[MUL_US]]
; CHECK-NEXT: [[IDXPROM_US:%.*]] = zext i32 [[FLATTEN_TRUNCIV]] to i64
-; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i16, i16* [[A:%.*]], i64 [[IDXPROM_US]]
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, i16* [[ARRAYIDX_US]], align 2
+; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i16, ptr [[A:%.*]], i64 [[IDXPROM_US]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr [[ARRAYIDX_US]], align 2
; CHECK-NEXT: [[ADD5_US:%.*]] = add i16 [[TMP4]], [[VAL:%.*]]
-; CHECK-NEXT: store i16 [[ADD5_US]], i16* [[ARRAYIDX_US]], align 2
+; CHECK-NEXT: store i16 [[ADD5_US]], ptr [[ARRAYIDX_US]], align 2
; CHECK-NEXT: [[INDVAR_NEXT:%.*]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[CMP2_US:%.*]] = icmp ult i64 [[INDVAR_NEXT]], [[TMP0]]
; CHECK-NEXT: br label [[FOR_COND1_FOR_INC7_CRIT_EDGE_US]]
; DONTWIDEN-NEXT: [[J_019_US:%.*]] = phi i32 [ 0, [[FOR_COND1_PREHEADER_US]] ], [ [[INC_US:%.*]], [[FOR_BODY3_US]] ]
; DONTWIDEN-NEXT: [[ADD_US:%.*]] = add i32 [[J_019_US]], [[MUL_US]]
; DONTWIDEN-NEXT: [[IDXPROM_US:%.*]] = zext i32 [[ADD_US]] to i64
-; DONTWIDEN-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i16, i16* [[A:%.*]], i64 [[IDXPROM_US]]
-; DONTWIDEN-NEXT: [[TMP0:%.*]] = load i16, i16* [[ARRAYIDX_US]], align 2
+; DONTWIDEN-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i16, ptr [[A:%.*]], i64 [[IDXPROM_US]]
+; DONTWIDEN-NEXT: [[TMP0:%.*]] = load i16, ptr [[ARRAYIDX_US]], align 2
; DONTWIDEN-NEXT: [[ADD5_US:%.*]] = add i16 [[TMP0]], [[VAL:%.*]]
-; DONTWIDEN-NEXT: store i16 [[ADD5_US]], i16* [[ARRAYIDX_US]], align 2
+; DONTWIDEN-NEXT: store i16 [[ADD5_US]], ptr [[ARRAYIDX_US]], align 2
; DONTWIDEN-NEXT: [[INC_US]] = add nuw i32 [[J_019_US]], 1
; DONTWIDEN-NEXT: [[CMP2_US:%.*]] = icmp ult i32 [[INC_US]], [[N]]
; DONTWIDEN-NEXT: br i1 [[CMP2_US]], label [[FOR_BODY3_US]], label [[FOR_COND1_FOR_INC7_CRIT_EDGE_US]]
%j.019.us = phi i32 [ 0, %for.cond1.preheader.us ], [ %inc.us, %for.body3.us ]
%add.us = add i32 %j.019.us, %mul.us
%idxprom.us = zext i32 %add.us to i64
- %arrayidx.us = getelementptr inbounds i16, i16* %A, i64 %idxprom.us
- %0 = load i16, i16* %arrayidx.us, align 2
+ %arrayidx.us = getelementptr inbounds i16, ptr %A, i64 %idxprom.us
+ %0 = load i16, ptr %arrayidx.us, align 2
%add5.us = add i16 %0, %val
- store i16 %add5.us, i16* %arrayidx.us, align 2
+ store i16 %add5.us, ptr %arrayidx.us, align 2
%inc.us = add nuw i32 %j.019.us, 1
%cmp2.us = icmp ult i32 %inc.us, %N
br i1 %cmp2.us, label %for.body3.us, label %for.cond1.for.inc7_crit_edge.us
; Same as @foo, but M is sext from i16. This used to assert because we thought
; this sext was from widening and try to look through it.
-define void @foo_M_sext(i32* %A, i32 %N, i16 %M) {
+define void @foo_M_sext(ptr %A, i32 %N, i16 %M) {
; CHECK-LABEL: @foo_M_sext(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[M2:%.*]] = sext i16 [[M:%.*]] to i32
; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[INDVAR]] to i32
; CHECK-NEXT: [[ADD_US:%.*]] = add nsw i32 [[TMP3]], [[MUL_US]]
; CHECK-NEXT: [[IDXPROM_US:%.*]] = sext i32 [[FLATTEN_TRUNCIV]] to i64
-; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[IDXPROM_US]]
-; CHECK-NEXT: tail call void @f(i32* [[ARRAYIDX_US]])
+; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IDXPROM_US]]
+; CHECK-NEXT: tail call void @f(ptr [[ARRAYIDX_US]])
; CHECK-NEXT: [[INDVAR_NEXT:%.*]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[CMP2_US:%.*]] = icmp slt i64 [[INDVAR_NEXT]], [[TMP0]]
; CHECK-NEXT: br label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US]]
; DONTWIDEN-NEXT: [[J_016_US:%.*]] = phi i32 [ 0, [[FOR_COND1_PREHEADER_US]] ], [ [[INC_US:%.*]], [[FOR_BODY4_US]] ]
; DONTWIDEN-NEXT: [[ADD_US:%.*]] = add nsw i32 [[J_016_US]], [[MUL_US]]
; DONTWIDEN-NEXT: [[IDXPROM_US:%.*]] = sext i32 [[ADD_US]] to i64
-; DONTWIDEN-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[IDXPROM_US]]
-; DONTWIDEN-NEXT: tail call void @f(i32* [[ARRAYIDX_US]])
+; DONTWIDEN-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IDXPROM_US]]
+; DONTWIDEN-NEXT: tail call void @f(ptr [[ARRAYIDX_US]])
; DONTWIDEN-NEXT: [[INC_US]] = add nuw nsw i32 [[J_016_US]], 1
; DONTWIDEN-NEXT: [[CMP2_US:%.*]] = icmp slt i32 [[INC_US]], [[M2]]
; DONTWIDEN-NEXT: br i1 [[CMP2_US]], label [[FOR_BODY4_US]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US]]
%j.016.us = phi i32 [ 0, %for.cond1.preheader.us ], [ %inc.us, %for.body4.us ]
%add.us = add nsw i32 %j.016.us, %mul.us
%idxprom.us = sext i32 %add.us to i64
- %arrayidx.us = getelementptr inbounds i32, i32* %A, i64 %idxprom.us
- tail call void @f(i32* %arrayidx.us) #2
+ %arrayidx.us = getelementptr inbounds i32, ptr %A, i64 %idxprom.us
+ tail call void @f(ptr %arrayidx.us) #2
%inc.us = add nuw nsw i32 %j.016.us, 1
%cmp2.us = icmp slt i32 %inc.us, %M2
br i1 %cmp2.us, label %for.body4.us, label %for.cond1.for.cond.cleanup3_crit_edge.us
declare dso_local i32 @use_64(i64)
declare dso_local void @g(i32)
-declare dso_local void @f(i32* %0) local_unnamed_addr #1
+declare dso_local void @f(ptr %0) local_unnamed_addr #1
@d = dso_local global i32 0, align 4
@b = internal global i32 0, align 4
@a = internal global i32 0, align 4
-@c = dso_local global i32* null, align 8
+@c = dso_local global ptr null, align 8
define dso_local i32 @fn1() local_unnamed_addr #0 {
; CHECK-LABEL: @fn1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @d, align 4
-; CHECK-NEXT: store i32 [[TMP0]], i32* @b, align 4
-; CHECK-NEXT: store i32 [[TMP0]], i32* @a, align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @d, align 4
+; CHECK-NEXT: store i32 [[TMP0]], ptr @b, align 4
+; CHECK-NEXT: store i32 [[TMP0]], ptr @a, align 4
; CHECK-NEXT: [[CMP15:%.*]] = icmp sgt i32 [[TMP0]], 0
; CHECK-NEXT: br i1 [[CMP15]], label [[FOR_COND1_PREHEADER_US_PREHEADER:%.*]], label [[FOR_END6:%.*]]
; CHECK: for.cond1.preheader.us.preheader:
; CHECK: for.cond1.preheader.us:
; CHECK-NEXT: [[INDVAR2:%.*]] = phi i64 [ [[INDVAR_NEXT3:%.*]], [[FOR_COND1_FOR_INC4_CRIT_EDGE_US:%.*]] ], [ 0, [[FOR_COND1_PREHEADER_US_PREHEADER]] ]
; CHECK-NEXT: [[I_016_US:%.*]] = phi i32 [ [[INC5_US:%.*]], [[FOR_COND1_FOR_INC4_CRIT_EDGE_US]] ], [ 0, [[FOR_COND1_PREHEADER_US_PREHEADER]] ]
-; CHECK-NEXT: [[TMP4:%.*]] = load i32*, i32** @c, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr @c, align 8
; CHECK-NEXT: [[TMP5:%.*]] = mul nsw i64 [[INDVAR2]], [[TMP2]]
; CHECK-NEXT: [[MUL_US:%.*]] = mul nsw i32 [[I_016_US]], [[TMP0]]
; CHECK-NEXT: [[TMP6:%.*]] = sext i32 [[MUL_US]] to i64
; CHECK-NEXT: [[TMP9:%.*]] = add nsw i64 [[TMP8]], [[TMP5]]
; CHECK-NEXT: [[ADD_US:%.*]] = add nsw i32 [[J_014_US]], [[MUL_US]]
; CHECK-NEXT: [[IDXPROM_US:%.*]] = sext i32 [[ADD_US]] to i64
-; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 [[TMP7]]
-; CHECK-NEXT: store i32 32, i32* [[ARRAYIDX_US]], align 4
+; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i64 [[TMP7]]
+; CHECK-NEXT: store i32 32, ptr [[ARRAYIDX_US]], align 4
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[INC_US]] = add nuw nsw i32 [[J_014_US]], 1
; CHECK-NEXT: [[CMP2_US:%.*]] = icmp slt i64 [[INDVAR_NEXT]], [[TMP1]]
; CHECK-NEXT: ret i32 undef
;
entry:
- %0 = load i32, i32* @d, align 4
- store i32 %0, i32* @b, align 4
- store i32 %0, i32* @a, align 4
+ %0 = load i32, ptr @d, align 4
+ store i32 %0, ptr @b, align 4
+ store i32 %0, ptr @a, align 4
%cmp15 = icmp sgt i32 %0, 0
br i1 %cmp15, label %for.cond1.preheader.us.preheader, label %for.end6
for.cond1.preheader.us:
%i.016.us = phi i32 [ %inc5.us, %for.cond1.for.inc4_crit_edge.us ], [ 0, %for.cond1.preheader.us.preheader ]
- %1 = load i32*, i32** @c, align 8
+ %1 = load ptr, ptr @c, align 8
%mul.us = mul nsw i32 %i.016.us, %0
br label %for.body3.us
%j.014.us = phi i32 [ 0, %for.cond1.preheader.us ], [ %inc.us, %for.body3.us ]
%add.us = add nsw i32 %j.014.us, %mul.us
%idxprom.us = sext i32 %add.us to i64
- %arrayidx.us = getelementptr inbounds i32, i32* %1, i64 %idxprom.us
- store i32 32, i32* %arrayidx.us, align 4
+ %arrayidx.us = getelementptr inbounds i32, ptr %1, i64 %idxprom.us
+ store i32 32, ptr %arrayidx.us, align 4
%inc.us = add nuw nsw i32 %j.014.us, 1
%cmp2.us = icmp slt i32 %inc.us, %0
br i1 %cmp2.us, label %for.body3.us, label %for.cond1.for.inc4_crit_edge.us
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i32 [[INDVAR]], [[TMP0]]
; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i16 [[J_011]], [[MUL]]
; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i16
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [64 x i16], [64 x i16]* @v, i16 0, i16 [[TMP3]]
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, i16* [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [64 x i16], ptr @v, i16 0, i16 [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[ADD5]] = add nsw i16 [[TMP4]], [[SUM_110]]
; CHECK-NEXT: [[INDVAR_NEXT]] = add i32 [[INDVAR]], 1
; CHECK-NEXT: [[INC]] = add nuw nsw i16 [[J_011]], 1
%j.011 = phi i16 [ 0, %for.cond1.preheader ], [ %inc, %for.body4 ]
%sum.110 = phi i16 [ %sum.012, %for.cond1.preheader ], [ %add5, %for.body4 ]
%add = add nuw nsw i16 %j.011, %mul
- %arrayidx = getelementptr inbounds [64 x i16], [64 x i16]* @v, i16 0, i16 %add
- %0 = load i16, i16* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds [64 x i16], ptr @v, i16 0, i16 %add
+ %0 = load i16, ptr %arrayidx, align 1
%add5 = add nsw i16 %0, %sum.110
%inc = add nuw nsw i16 %j.011, 1
%exitcond.not = icmp eq i16 %inc, 16
; CHECK: bb20.preheader
; CHECK: ****************************
; CHECK: Loop Fusion complete
-define void @non_cfe(i32* noalias %arg, i32 %N) {
+define void @non_cfe(ptr noalias %arg, i32 %N) {
bb:
br label %bb7
%tmp10 = mul nsw i32 %tmp, %tmp9
%tmp11 = trunc i64 %indvars.iv23 to i32
%tmp12 = srem i32 %tmp10, %tmp11
- %tmp13 = getelementptr inbounds i32, i32* %arg, i64 %indvars.iv23
- store i32 %tmp12, i32* %tmp13, align 4
+ %tmp13 = getelementptr inbounds i32, ptr %arg, i64 %indvars.iv23
+ store i32 %tmp12, ptr %tmp13, align 4
br label %bb14
bb14: ; preds = %bb7
br i1 %cmp, label %bb16, label %bb33
bb16: ; preds = %bb34
- %tmp17 = load i32, i32* %arg, align 4
+ %tmp17 = load i32, ptr %arg, align 4
%tmp18 = icmp slt i32 %tmp17, 0
br i1 %tmp18, label %bb20.preheader, label %bb33
%tmp26 = mul nsw i32 %tmp23, %tmp25
%tmp27 = trunc i64 %indvars.iv1 to i32
%tmp28 = srem i32 %tmp26, %tmp27
- %tmp29 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %indvars.iv1
- store i32 %tmp28, i32* %tmp29, align 4
+ %tmp29 = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %indvars.iv1
+ store i32 %tmp28, ptr %tmp29, align 4
br label %bb30
bb30: ; preds = %bb22
; CHECK-NEXT: [[LOOP2PREHEADER]]
; CHECK: Fusion candidates are not adjacent. Not fusing.
; CHECK: Loop Fusion complete
-define void @non_adjacent(i32* noalias %arg) {
+define void @non_adjacent(ptr noalias %arg) {
bb:
br label %bb5
%tmp7 = mul nsw i64 %tmp, %tmp6
%tmp8 = srem i64 %tmp7, %.013
%tmp9 = trunc i64 %tmp8 to i32
- %tmp10 = getelementptr inbounds i32, i32* %arg, i64 %.013
- store i32 %tmp9, i32* %tmp10, align 4
+ %tmp10 = getelementptr inbounds i32, ptr %arg, i64 %.013
+ store i32 %tmp9, ptr %tmp10, align 4
br label %bb11
bb11: ; preds = %bb5
%tmp19 = mul nsw i64 %tmp17, %tmp18
%tmp20 = srem i64 %tmp19, %.02
%tmp21 = trunc i64 %tmp20 to i32
- %tmp22 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %.02
- store i32 %tmp21, i32* %tmp22, align 4
+ %tmp22 = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %.02
+ store i32 %tmp21, ptr %tmp22, align 4
br label %bb23
bb23: ; preds = %bb16
; CHECK-NEXT: [[LOOP2PREHEADER]]
; CHECK: Fusion candidates do not have identical trip counts. Not fusing.
; CHECK: Loop Fusion complete
-define void @different_bounds(i32* noalias %arg) {
+define void @different_bounds(ptr noalias %arg) {
bb:
br label %bb5
%tmp7 = mul nsw i64 %tmp, %tmp6
%tmp8 = srem i64 %tmp7, %.013
%tmp9 = trunc i64 %tmp8 to i32
- %tmp10 = getelementptr inbounds i32, i32* %arg, i64 %.013
- store i32 %tmp9, i32* %tmp10, align 4
+ %tmp10 = getelementptr inbounds i32, ptr %arg, i64 %.013
+ store i32 %tmp9, ptr %tmp10, align 4
br label %bb11
bb11: ; preds = %bb5
%tmp19 = mul nsw i64 %tmp17, %tmp18
%tmp20 = srem i64 %tmp19, %.02
%tmp21 = trunc i64 %tmp20 to i32
- %tmp22 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %.02
- store i32 %tmp21, i32* %tmp22, align 4
+ %tmp22 = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %.02
+ store i32 %tmp21, ptr %tmp22, align 4
br label %bb23
bb23: ; preds = %bb16
; CHECK-NEXT: [[LOOP2PREHEADER]]
; CHECK: Memory dependencies do not allow fusion!
; CHECK: Loop Fusion complete
-define void @negative_dependence(i32* noalias %arg) {
+define void @negative_dependence(ptr noalias %arg) {
bb:
br label %bb7
bb7: ; preds = %bb, %bb9
%indvars.iv22 = phi i64 [ 0, %bb ], [ %indvars.iv.next3, %bb9 ]
- %tmp = getelementptr inbounds i32, i32* %arg, i64 %indvars.iv22
+ %tmp = getelementptr inbounds i32, ptr %arg, i64 %indvars.iv22
%tmp8 = trunc i64 %indvars.iv22 to i32
- store i32 %tmp8, i32* %tmp, align 4
+ store i32 %tmp8, ptr %tmp, align 4
br label %bb9
bb9: ; preds = %bb7
bb13: ; preds = %bb11.preheader, %bb18
%indvars.iv1 = phi i64 [ 0, %bb11.preheader ], [ %indvars.iv.next, %bb18 ]
%indvars.iv.next = add nuw nsw i64 %indvars.iv1, 1
- %tmp14 = getelementptr inbounds i32, i32* %arg, i64 %indvars.iv.next
- %tmp15 = load i32, i32* %tmp14, align 4
+ %tmp14 = getelementptr inbounds i32, ptr %arg, i64 %indvars.iv.next
+ %tmp15 = load i32, ptr %tmp14, align 4
%tmp16 = shl nsw i32 %tmp15, 1
- %tmp17 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %indvars.iv1
- store i32 %tmp16, i32* %tmp17, align 4
+ %tmp17 = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %indvars.iv1
+ store i32 %tmp16, ptr %tmp17, align 4
br label %bb18
bb18: ; preds = %bb13
; CHECK-NEXT: [[LOOP2PREHEADER]]
; CHECK: Memory dependencies do not allow fusion!
; CHECK: Loop Fusion complete
-define i32 @sumTest(i32* noalias %arg) {
+define i32 @sumTest(ptr noalias %arg) {
bb:
br label %bb9
%.01.lcssa = phi i32 [ 0, %bb ], [ %tmp11, %bb9 ]
%.013 = phi i32 [ 0, %bb ], [ %tmp11, %bb9 ]
%indvars.iv32 = phi i64 [ 0, %bb ], [ %indvars.iv.next4, %bb9 ]
- %tmp = getelementptr inbounds i32, i32* %arg, i64 %indvars.iv32
- %tmp10 = load i32, i32* %tmp, align 4
+ %tmp = getelementptr inbounds i32, ptr %arg, i64 %indvars.iv32
+ %tmp10 = load i32, ptr %tmp, align 4
%tmp11 = add nsw i32 %.013, %tmp10
%indvars.iv.next4 = add nuw nsw i64 %indvars.iv32, 1
%exitcond5 = icmp ne i64 %indvars.iv.next4, 100
bb15: ; preds = %bb13.preheader, %bb20
%indvars.iv1 = phi i64 [ 0, %bb13.preheader ], [ %indvars.iv.next, %bb20 ]
- %tmp16 = getelementptr inbounds i32, i32* %arg, i64 %indvars.iv1
- %tmp17 = load i32, i32* %tmp16, align 4
+ %tmp16 = getelementptr inbounds i32, ptr %arg, i64 %indvars.iv1
+ %tmp17 = load i32, ptr %tmp16, align 4
%tmp18 = sdiv i32 %tmp17, %.01.lcssa
- %tmp19 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %indvars.iv1
- store i32 %tmp18, i32* %tmp19, align 4
+ %tmp19 = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %indvars.iv1
+ store i32 %tmp18, ptr %tmp19, align 4
br label %bb20
bb20: ; preds = %bb15
; CHECK-NEXT: [[LOOP2PREHEADER]]
; CHECK: Memory dependencies do not allow fusion!
; CHECK: Loop Fusion complete
-define float @test(float* nocapture %a, i32 %n) {
+define float @test(ptr nocapture %a, i32 %n) {
entry:
%conv = zext i32 %n to i64
%cmp32 = icmp eq i32 %n, 0
%i.034 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%sum1.033 = phi float [ %add, %for.body ], [ 0.000000e+00, %entry ]
%idxprom = trunc i64 %i.034 to i32
- %arrayidx = getelementptr inbounds float, float* %a, i32 %idxprom
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %a, i32 %idxprom
+ %0 = load float, ptr %arrayidx, align 4
%add = fadd float %sum1.033, %0
%inc = add nuw nsw i64 %i.034, 1
%cmp = icmp ult i64 %inc, %conv
for.body8: ; preds = %for.body, %for.body8
%i2.031 = phi i64 [ %inc14, %for.body8 ], [ 0, %for.body ]
%idxprom9 = trunc i64 %i2.031 to i32
- %arrayidx10 = getelementptr inbounds float, float* %a, i32 %idxprom9
- %1 = load float, float* %arrayidx10, align 4
+ %arrayidx10 = getelementptr inbounds float, ptr %a, i32 %idxprom9
+ %1 = load float, ptr %arrayidx10, align 4
%div = fdiv float %1, %add
- store float %div, float* %arrayidx10, align 4
+ store float %div, ptr %arrayidx10, align 4
%inc14 = add nuw nsw i64 %i2.031, 1
%cmp5 = icmp ult i64 %inc14, %conv
br i1 %cmp5, label %for.body8, label %for.cond.cleanup7
; CHECK: Performing Loop Fusion on function notRotated
; CHECK: Loop bb{{.*}} is not rotated!
; CHECK: Loop bb{{.*}} is not rotated!
-define void @notRotated(i32* noalias %arg) {
+define void @notRotated(ptr noalias %arg) {
bb:
br label %bb5
%tmp10 = mul nsw i32 %tmp, %tmp9
%tmp11 = trunc i64 %indvars.iv2 to i32
%tmp12 = srem i32 %tmp10, %tmp11
- %tmp13 = getelementptr inbounds i32, i32* %arg, i64 %indvars.iv2
- store i32 %tmp12, i32* %tmp13, align 4
+ %tmp13 = getelementptr inbounds i32, ptr %arg, i64 %indvars.iv2
+ store i32 %tmp12, ptr %tmp13, align 4
br label %bb14
bb14: ; preds = %bb7
%tmp23 = mul nsw i32 %tmp20, %tmp22
%tmp24 = trunc i64 %indvars.iv to i32
%tmp25 = srem i32 %tmp23, %tmp24
- %tmp26 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %indvars.iv
- store i32 %tmp25, i32* %tmp26, align 4
+ %tmp26 = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %indvars.iv
+ store i32 %tmp25, ptr %tmp26, align 4
br label %bb27
bb27: ; preds = %bb19
; CHECK: remark: diagnostics_analysis.c:6:3: [test]: Loop is not a candidate for fusion: Loop contains a volatile access
; CHECK: remark: diagnostics_analysis.c:10:3: [test]: Loop is not a candidate for fusion: Loop has unknown trip count
-define void @test(i32* %A, i32 %n) !dbg !15 {
+define void @test(ptr %A, i32 %n) !dbg !15 {
entry:
- %A.addr = alloca i32*, align 8
+ %A.addr = alloca ptr, align 8
%n.addr = alloca i32, align 4
%i = alloca i32, align 4
%i1 = alloca i32, align 4
- store i32* %A, i32** %A.addr, align 8
- store i32 %n, i32* %n.addr, align 4
- %0 = bitcast i32* %i to i8*
- store i32 0, i32* %i, align 4
+ store ptr %A, ptr %A.addr, align 8
+ store i32 %n, ptr %n.addr, align 4
+ store i32 0, ptr %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %1 = load i32, i32* %i, align 4
- %2 = load i32, i32* %n.addr, align 4
- %cmp = icmp slt i32 %1, %2
+ %0 = load i32, ptr %i, align 4
+ %1 = load i32, ptr %n.addr, align 4
+ %cmp = icmp slt i32 %0, %1
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond
- %3 = bitcast i32* %i to i8*, !dbg !42
br label %for.end
for.body: ; preds = %for.cond
- %4 = load i32, i32* %i, align 4
- %sub = sub nsw i32 %4, 3
- %5 = load i32, i32* %i, align 4
- %add = add nsw i32 %5, 3
+ %2 = load i32, ptr %i, align 4
+ %sub = sub nsw i32 %2, 3
+ %3 = load i32, ptr %i, align 4
+ %add = add nsw i32 %3, 3
%mul = mul nsw i32 %sub, %add
- %6 = load i32, i32* %i, align 4
- %rem = srem i32 %mul, %6
- %7 = load i32*, i32** %A.addr, align 8
- %8 = load i32, i32* %i, align 4
- %idxprom = sext i32 %8 to i64
- %arrayidx = getelementptr inbounds i32, i32* %7, i64 %idxprom
- store volatile i32 %rem, i32* %arrayidx, align 4
+ %4 = load i32, ptr %i, align 4
+ %rem = srem i32 %mul, %4
+ %5 = load ptr, ptr %A.addr, align 8
+ %6 = load i32, ptr %i, align 4
+ %idxprom = sext i32 %6 to i64
+ %arrayidx = getelementptr inbounds i32, ptr %5, i64 %idxprom
+ store volatile i32 %rem, ptr %arrayidx, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %9 = load i32, i32* %i, align 4, !dbg !49
- %inc = add nsw i32 %9, 1, !dbg !49
- store i32 %inc, i32* %i, align 4, !dbg !49
+ %7 = load i32, ptr %i, align 4, !dbg !49
+ %inc = add nsw i32 %7, 1, !dbg !49
+ store i32 %inc, ptr %i, align 4, !dbg !49
br label %for.cond, !dbg !42, !llvm.loop !50
for.end: ; preds = %for.cond.cleanup
- %10 = bitcast i32* %i1 to i8*
- store i32 0, i32* %i1, align 4
+ store i32 0, ptr %i1, align 4
br label %for.cond2
for.cond2: ; preds = %for.inc12, %for.end
- %11 = load i32, i32* %i1, align 4
- %12 = load i32, i32* %n.addr, align 4
- %cmp3 = icmp slt i32 %11, %12
+ %8 = load i32, ptr %i1, align 4
+ %9 = load i32, ptr %n.addr, align 4
+ %cmp3 = icmp slt i32 %8, %9
br i1 %cmp3, label %for.body5, label %for.cond.cleanup4
for.cond.cleanup4: ; preds = %for.cond2
- %13 = bitcast i32* %i1 to i8*
br label %for.end14
for.body5: ; preds = %for.cond2
- %14 = load i32, i32* %i1, align 4
- %sub6 = sub nsw i32 %14, 3
- %15 = load i32, i32* %i1, align 4
- %add7 = add nsw i32 %15, 3
+ %10 = load i32, ptr %i1, align 4
+ %sub6 = sub nsw i32 %10, 3
+ %11 = load i32, ptr %i1, align 4
+ %add7 = add nsw i32 %11, 3
%mul8 = mul nsw i32 %sub6, %add7
- %16 = load i32, i32* %i1, align 4
- %rem9 = srem i32 %mul8, %16
- %17 = load i32, i32* %i1, align 4
- %idxprom10 = sext i32 %17 to i64
- %arrayidx11 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %idxprom10
- store i32 %rem9, i32* %arrayidx11, align 4
+ %12 = load i32, ptr %i1, align 4
+ %rem9 = srem i32 %mul8, %12
+ %13 = load i32, ptr %i1, align 4
+ %idxprom10 = sext i32 %13 to i64
+ %arrayidx11 = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %idxprom10
+ store i32 %rem9, ptr %arrayidx11, align 4
br label %for.inc12
for.inc12: ; preds = %for.body5
- %18 = load i32, i32* %i1, align 4
- %inc13 = add nsw i32 %18, 1
- store i32 %inc13, i32* %i1, align 4
+ %14 = load i32, ptr %i1, align 4
+ %inc13 = add nsw i32 %14, 1
+ store i32 %inc13, ptr %i1, align 4
br label %for.cond2, !dbg !59, !llvm.loop !67
for.end14: ; preds = %for.cond.cleanup4
@B = common global [1024 x i32] zeroinitializer, align 16, !dbg !0
; CHECK: remark: diagnostics_missed.c:18:3: [non_adjacent]: entry and for.end: Loops are not adjacent
-define void @non_adjacent(i32* noalias %A) !dbg !14 {
+define void @non_adjacent(ptr noalias %A) !dbg !14 {
entry:
br label %for.body
%mul = mul nsw i64 %sub, %add
%rem = srem i64 %mul, %i.02
%conv = trunc i64 %rem to i32
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.02
- store i32 %conv, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %i.02
+ store i32 %conv, ptr %arrayidx, align 4
br label %for.inc
for.inc: ; preds = %for.body
%mul9 = mul nsw i64 %sub7, %add8
%rem10 = srem i64 %mul9, %i1.01
%conv11 = trunc i64 %rem10 to i32
- %arrayidx12 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %i1.01
- store i32 %conv11, i32* %arrayidx12, align 4
+ %arrayidx12 = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %i1.01
+ store i32 %conv11, ptr %arrayidx12, align 4
br label %for.inc13
for.inc13: ; preds = %for.body6
}
; CHECK: remark: diagnostics_missed.c:28:3: [different_bounds]: entry and for.end: Loop trip counts are not the same
-define void @different_bounds(i32* noalias %A) !dbg !36 {
+define void @different_bounds(ptr noalias %A) !dbg !36 {
entry:
br label %for.body
%mul = mul nsw i64 %sub, %add
%rem = srem i64 %mul, %i.02
%conv = trunc i64 %rem to i32
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.02
- store i32 %conv, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %i.02
+ store i32 %conv, ptr %arrayidx, align 4
br label %for.inc
for.inc: ; preds = %for.body
%mul9 = mul nsw i64 %sub7, %add8
%rem10 = srem i64 %mul9, %i1.01
%conv11 = trunc i64 %rem10 to i32
- %arrayidx12 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %i1.01
- store i32 %conv11, i32* %arrayidx12, align 4
+ %arrayidx12 = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %i1.01
+ store i32 %conv11, ptr %arrayidx12, align 4
br label %for.inc13
for.inc13: ; preds = %for.body6
}
; CHECK: remark: diagnostics_missed.c:38:3: [negative_dependence]: entry and for.end: Dependencies prevent fusion
-define void @negative_dependence(i32* noalias %A) !dbg !51 {
+define void @negative_dependence(ptr noalias %A) !dbg !51 {
entry:
br label %for.body
for.body: ; preds = %entry, %for.inc
%indvars.iv13 = phi i64 [ 0, %entry ], [ %indvars.iv.next2, %for.inc ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv13
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv13
%tmp = trunc i64 %indvars.iv13 to i32
- store i32 %tmp, i32* %arrayidx, align 4
+ store i32 %tmp, ptr %arrayidx, align 4
br label %for.inc
for.inc: ; preds = %for.body
for.body5: ; preds = %for.end, %for.inc10
%indvars.iv2 = phi i64 [ 0, %for.end ], [ %indvars.iv.next, %for.inc10 ]
%indvars.iv.next = add nuw nsw i64 %indvars.iv2, 1
- %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
- %tmp4 = load i32, i32* %arrayidx7, align 4
+ %arrayidx7 = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+ %tmp4 = load i32, ptr %arrayidx7, align 4
%mul = shl nsw i32 %tmp4, 1
- %arrayidx9 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %indvars.iv2
- store i32 %mul, i32* %arrayidx9, align 4
+ %arrayidx9 = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %indvars.iv2
+ store i32 %mul, ptr %arrayidx9, align 4
br label %for.inc10
for.inc10: ; preds = %for.body5
}
; CHECK: remark: diagnostics_missed.c:51:3: [sumTest]: entry and for.cond2.preheader: Dependencies prevent fusion
-define i32 @sumTest(i32* noalias %A) !dbg !63 {
+define i32 @sumTest(ptr noalias %A) !dbg !63 {
entry:
br label %for.body
br label %for.inc
for.inc: ; preds = %for.body
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv13
- %tmp = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv13
+ %tmp = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %sum.04, %tmp
%indvars.iv.next2 = add nuw nsw i64 %indvars.iv13, 1
%exitcond3 = icmp ne i64 %indvars.iv.next2, 100
for.body5: ; preds = %for.cond2.preheader, %for.inc10
%indvars.iv2 = phi i64 [ 0, %for.cond2.preheader ], [ %indvars.iv.next, %for.inc10 ]
- %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv2
- %tmp4 = load i32, i32* %arrayidx7, align 4
+ %arrayidx7 = getelementptr inbounds i32, ptr %A, i64 %indvars.iv2
+ %tmp4 = load i32, ptr %arrayidx7, align 4
%div = sdiv i32 %tmp4, %add
- %arrayidx9 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %indvars.iv2
- store i32 %div, i32* %arrayidx9, align 4
+ %arrayidx9 = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %indvars.iv2
+ store i32 %div, ptr %arrayidx9, align 4
br label %for.inc10
for.inc10: ; preds = %for.body5
declare void @llvm.dbg.value(metadata, metadata, metadata) #0
; CHECK: remark: diagnostics_missed.c:62:3: [unsafe_preheader]: for.first.preheader and for.second.preheader: Loop has a non-empty preheader with instructions that cannot be moved
-define void @unsafe_preheader(i32* noalias %A, i32* noalias %B) {
+define void @unsafe_preheader(ptr noalias %A, ptr noalias %B) {
for.first.preheader:
br label %for.first, !dbg !80
for.first:
%i.02 = phi i64 [ 0, %for.first.preheader ], [ %inc, %for.first ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.02
- store i32 0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %i.02
+ store i32 0, ptr %arrayidx, align 4
%inc = add nsw i64 %i.02, 1
%cmp = icmp slt i64 %inc, 100
br i1 %cmp, label %for.first, label %for.second.preheader
for.second:
%j.01 = phi i64 [ 0, %for.second.preheader ], [ %inc6, %for.second ]
- %arrayidx4 = getelementptr inbounds i32, i32* %B, i64 %j.01
- store i32 0, i32* %arrayidx4, align 4
+ %arrayidx4 = getelementptr inbounds i32, ptr %B, i64 %j.01
+ store i32 0, ptr %arrayidx4, align 4
%inc6 = add nsw i64 %j.01, 1
%cmp2 = icmp slt i64 %inc6, 100
br i1 %cmp2, label %for.second, label %for.end
}
; CHECK: remark: diagnostics_missed.c:67:3: [unsafe_exitblock]: for.first.preheader and for.second.preheader: Candidate has a non-empty exit block with instructions that cannot be moved
-define void @unsafe_exitblock(i32* noalias %A, i32* noalias %B, i64 %N) {
+define void @unsafe_exitblock(ptr noalias %A, ptr noalias %B, i64 %N) {
for.first.guard:
%cmp3 = icmp slt i64 0, %N
br i1 %cmp3, label %for.first.preheader, label %for.second.guard
for.first:
%i.04 = phi i64 [ %inc, %for.first ], [ 0, %for.first.preheader ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.04
- store i32 0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %i.04
+ store i32 0, ptr %arrayidx, align 4
%inc = add nsw i64 %i.04, 1
%cmp = icmp slt i64 %inc, %N
br i1 %cmp, label %for.first, label %for.first.exit
for.second:
%j.02 = phi i64 [ %inc6, %for.second ], [ 0, %for.second.preheader ]
- %arrayidx4 = getelementptr inbounds i32, i32* %B, i64 %j.02
- store i32 0, i32* %arrayidx4, align 4
+ %arrayidx4 = getelementptr inbounds i32, ptr %B, i64 %j.02
+ store i32 0, ptr %arrayidx4, align 4
%inc6 = add nsw i64 %j.02, 1
%cmp2 = icmp slt i64 %inc6, %N
br i1 %cmp2, label %for.second, label %for.second.exit
}
; CHECK: remark: diagnostics_missed.c:72:3: [unsafe_guardblock]: for.first.preheader and for.second.preheader: Candidate has a non-empty guard block with instructions that cannot be moved
-define void @unsafe_guardblock(i32* noalias %A, i32* noalias %B, i64 %N) {
+define void @unsafe_guardblock(ptr noalias %A, ptr noalias %B, i64 %N) {
for.first.guard:
%cmp3 = icmp slt i64 0, %N
br i1 %cmp3, label %for.first.preheader, label %for.second.guard
for.first:
%i.04 = phi i64 [ %inc, %for.first ], [ 0, %for.first.preheader ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.04
- store i32 0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %i.04
+ store i32 0, ptr %arrayidx, align 4
%inc = add nsw i64 %i.04, 1
%cmp = icmp slt i64 %inc, %N
br i1 %cmp, label %for.first, label %for.first.exit
for.second:
%j.02 = phi i64 [ %inc6, %for.second ], [ 0, %for.second.preheader ]
- %arrayidx4 = getelementptr inbounds i32, i32* %B, i64 %j.02
- store i32 0, i32* %arrayidx4, align 4
+ %arrayidx4 = getelementptr inbounds i32, ptr %B, i64 %j.02
+ store i32 0, ptr %arrayidx4, align 4
%inc6 = add nsw i64 %j.02, 1
%cmp2 = icmp slt i64 %inc6, %N
br i1 %cmp2, label %for.second, label %for.second.exit
inner1.body:
%iv70 = phi i64 [ %iv.next71, %inner1.body ], [ 0, %inner1.ph ]
- %idx6 = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @a, i64 0, i64 %iv74, i64 %iv70
- %0 = load i32, i32* %idx6
+ %idx6 = getelementptr inbounds [10 x [10 x i32]], ptr @a, i64 0, i64 %iv74, i64 %iv70
+ %0 = load i32, ptr %idx6
%add = add nsw i32 %0, 2
- %idx10 = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @b, i64 0, i64 %iv74, i64 %iv70
- store i32 %add, i32* %idx10
+ %idx10 = getelementptr inbounds [10 x [10 x i32]], ptr @b, i64 0, i64 %iv74, i64 %iv70
+ store i32 %add, ptr %idx10
%iv.next71 = add nuw nsw i64 %iv70, 1
%exitcond73 = icmp eq i64 %iv.next71, %wide.trip.count72
br i1 %exitcond73, label %inner1.exit, label %inner1.body
inner2.body:
%iv = phi i64 [ %iv.next, %inner2.body ], [ 0, %inner2.ph ]
- %idx27 = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @a, i64 0, i64 %iv66, i64 %iv
- %1 = load i32, i32* %idx27
+ %idx27 = getelementptr inbounds [10 x [10 x i32]], ptr @a, i64 0, i64 %iv66, i64 %iv
+ %1 = load i32, ptr %idx27
%mul = shl nsw i32 %1, 1
- %idx31 = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @c, i64 0, i64 %iv66, i64 %iv
- store i32 %mul, i32* %idx31
+ %idx31 = getelementptr inbounds [10 x [10 x i32]], ptr @c, i64 0, i64 %iv66, i64 %iv
+ store i32 %mul, ptr %idx31
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv.next, %wide.trip.count72
br i1 %exitcond, label %inner2.exit, label %inner2.body
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=loop-fusion -S | FileCheck %s
-define void @v_5_0(i32* %dummy) {
+define void @v_5_0(ptr %dummy) {
; CHECK-LABEL: @v_5_0(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup5:
; CHECK-NEXT: ret void
; CHECK: for.body6:
-; CHECK-NEXT: [[V_LOOP_2_0_V_LOOP_2_0_V_LOOP_2_0_18:%.*]] = load volatile i32, i32* [[DUMMY:%.*]], align 1
+; CHECK-NEXT: [[V_LOOP_2_0_V_LOOP_2_0_V_LOOP_2_0_18:%.*]] = load volatile i32, ptr [[DUMMY:%.*]], align 1
; CHECK-NEXT: br label [[FOR_BODY6]]
;
entry:
ret void
for.body6: ; preds = %for.body6, %for.cond.cleanup
- %v_loop_2.0.v_loop_2.0.v_loop_2.0.18 = load volatile i32, i32* %dummy, align 1
+ %v_loop_2.0.v_loop_2.0.v_loop_2.0.18 = load volatile i32, ptr %dummy, align 1
br label %for.body6
}
-define void @f943(i8* %dummy) {
+define void @f943(ptr %dummy) {
; CHECK-LABEL: @f943(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_COND32_I_I:%.*]]
; CHECK: for.body37.i.i:
; CHECK-NEXT: br i1 true, label [[FOR_BODY37_I_I]], label [[FOR_COND42_PREHEADER_I_I:%.*]]
; CHECK: for.body44.i.i:
-; CHECK-NEXT: store volatile i8 poison, i8* [[DUMMY:%.*]], align 1
+; CHECK-NEXT: store volatile i8 poison, ptr [[DUMMY:%.*]], align 1
; CHECK-NEXT: br i1 true, label [[FOR_BODY44_I_I]], label [[FOR_END47_I_I_LOOPEXIT:%.*]]
; CHECK: for.end47.i.i.loopexit:
; CHECK-NEXT: br label [[FOR_END47_I_I]]
br i1 true, label %for.body37.i.i, label %for.cond42.preheader.i.i
for.body44.i.i: ; preds = %for.body44.i.i, %for.cond42.preheader.i.i
- store volatile i8 poison, i8* %dummy, align 1
+ store volatile i8 poison, ptr %dummy, align 1
br i1 true, label %for.body44.i.i, label %for.end47.i.i
for.end47.i.i: ; preds = %for.body44.i.i, %for.cond42.preheader.i.i
%tmp18 = mul nsw i32 %tmp, %tmp17
%tmp19 = trunc i64 %indvars.iv107 to i32
%tmp20 = srem i32 %tmp18, %tmp19
- %tmp21 = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv107
- store i32 %tmp20, i32* %tmp21, align 4
+ %tmp21 = getelementptr inbounds [1024 x i32], ptr @A, i64 0, i64 %indvars.iv107
+ store i32 %tmp20, ptr %tmp21, align 4
br label %bb22
bb22: ; preds = %bb15
%tmp31 = mul nsw i32 %tmp28, %tmp30
%tmp32 = trunc i64 %indvars.iv75 to i32
%tmp33 = srem i32 %tmp31, %tmp32
- %tmp34 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %indvars.iv75
- store i32 %tmp33, i32* %tmp34, align 4
+ %tmp34 = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %indvars.iv75
+ store i32 %tmp33, ptr %tmp34, align 4
br label %bb35
bb35: ; preds = %bb27
%tmp44 = mul nsw i32 %tmp41, %tmp43
%tmp45 = trunc i64 %indvars.iv43 to i32
%tmp46 = srem i32 %tmp44, %tmp45
- %tmp47 = getelementptr inbounds [1024 x i32], [1024 x i32]* @C, i64 0, i64 %indvars.iv43
- store i32 %tmp46, i32* %tmp47, align 4
+ %tmp47 = getelementptr inbounds [1024 x i32], ptr @C, i64 0, i64 %indvars.iv43
+ store i32 %tmp46, ptr %tmp47, align 4
br label %bb48
bb48: ; preds = %bb40
%tmp57 = mul nsw i32 %tmp54, %tmp56
%tmp58 = trunc i64 %indvars.iv1 to i32
%tmp59 = srem i32 %tmp57, %tmp58
- %tmp60 = getelementptr inbounds [1024 x i32], [1024 x i32]* @D, i64 0, i64 %indvars.iv1
- store i32 %tmp59, i32* %tmp60, align 4
+ %tmp60 = getelementptr inbounds [1024 x i32], ptr @D, i64 0, i64 %indvars.iv1
+ store i32 %tmp59, ptr %tmp60, align 4
br label %bb61
bb61: ; preds = %bb53
@B = common global [1024 x i32] zeroinitializer, align 16
-define void @dep_free_parametric(i32* noalias %A, i64 %N) {
+define void @dep_free_parametric(ptr noalias %A, i64 %N) {
; CHECK-LABEL: @dep_free_parametric(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP4:%.*]] = icmp slt i64 0, [[N:%.*]]
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i64 [[SUB]], [[ADD]]
; CHECK-NEXT: [[REM:%.*]] = srem i64 [[MUL]], [[I_05]]
; CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[REM]] to i32
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[I_05]]
-; CHECK-NEXT: store i32 [[CONV]], i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[I_05]]
+; CHECK-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INC]] = add nsw i64 [[I_05]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[INC]], [[N]]
; CHECK-NEXT: [[SUB7:%.*]] = sub nsw i64 [[I1_02]], 3
; CHECK-NEXT: [[MUL9:%.*]] = mul nsw i64 [[SUB7]], [[ADD8]]
; CHECK-NEXT: [[REM10:%.*]] = srem i64 [[MUL9]], [[I1_02]]
; CHECK-NEXT: [[CONV11:%.*]] = trunc i64 [[REM10]] to i32
-; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 [[I1_02]]
-; CHECK-NEXT: store i32 [[CONV11]], i32* [[ARRAYIDX12]], align 4
+; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 [[I1_02]]
+; CHECK-NEXT: store i32 [[CONV11]], ptr [[ARRAYIDX12]], align 4
; CHECK-NEXT: [[INC14]] = add nsw i64 [[I1_02]], 1
; CHECK-NEXT: [[CMP3:%.*]] = icmp slt i64 [[INC14]], [[N]]
; CHECK-NEXT: br i1 [[CMP3]], label [[BB5]], label [[BB15:%.*]]
%mul = mul nsw i64 %sub, %add
%rem = srem i64 %mul, %i.05
%conv = trunc i64 %rem to i32
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.05
- store i32 %conv, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %i.05
+ store i32 %conv, ptr %arrayidx, align 4
%inc = add nsw i64 %i.05, 1
%cmp = icmp slt i64 %inc, %N
br i1 %cmp, label %bb5, label %bb10
%mul9 = mul nsw i64 %sub7, %add8
%rem10 = srem i64 %mul9, %i1.02
%conv11 = trunc i64 %rem10 to i32
- %arrayidx12 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %i1.02
- store i32 %conv11, i32* %arrayidx12, align 4
+ %arrayidx12 = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %i1.02
+ store i32 %conv11, ptr %arrayidx12, align 4
%inc14 = add nsw i64 %i1.02, 1
%cmp3 = icmp slt i64 %inc14, %N
br i1 %cmp3, label %bb9, label %bb15
; Test that `%add` is moved in for.first.preheader, and the two loops for.first
; and for.second are fused.
-define void @moveinsts_preheader(i32* noalias %A, i32* noalias %B, i64 %N, i32 %x) {
+define void @moveinsts_preheader(ptr noalias %A, ptr noalias %B, i64 %N, i32 %x) {
; CHECK-LABEL: @moveinsts_preheader(
; CHECK-NEXT: for.first.guard:
; CHECK-NEXT: [[CMP_GUARD:%.*]] = icmp slt i64 0, [[N:%.*]]
; CHECK: for.first:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[INC_I:%.*]], [[FOR_FIRST]] ], [ 0, [[FOR_FIRST_PREHEADER]] ]
; CHECK-NEXT: [[J:%.*]] = phi i64 [ [[INC_J:%.*]], [[FOR_FIRST]] ], [ 0, [[FOR_FIRST_PREHEADER]] ]
-; CHECK-NEXT: [[AI:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[I]]
-; CHECK-NEXT: store i32 0, i32* [[AI]], align 4
+; CHECK-NEXT: [[AI:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[I]]
+; CHECK-NEXT: store i32 0, ptr [[AI]], align 4
; CHECK-NEXT: [[INC_I]] = add nsw i64 [[I]], 1
; CHECK-NEXT: [[CMP_I:%.*]] = icmp slt i64 [[INC_I]], [[N]]
-; CHECK-NEXT: [[BJ:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[J]]
-; CHECK-NEXT: store i32 0, i32* [[BJ]], align 4
+; CHECK-NEXT: [[BJ:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[J]]
+; CHECK-NEXT: store i32 0, ptr [[BJ]], align 4
; CHECK-NEXT: [[INC_J]] = add nsw i64 [[J]], 1
; CHECK-NEXT: [[CMP_J:%.*]] = icmp slt i64 [[INC_J]], [[N]]
; CHECK-NEXT: br i1 [[CMP_J]], label [[FOR_FIRST]], label [[FOR_SECOND_EXIT:%.*]]
for.first:
%i = phi i64 [ %inc.i, %for.first ], [ 0, %for.first.preheader ]
- %Ai = getelementptr inbounds i32, i32* %A, i64 %i
- store i32 0, i32* %Ai, align 4
+ %Ai = getelementptr inbounds i32, ptr %A, i64 %i
+ store i32 0, ptr %Ai, align 4
%inc.i = add nsw i64 %i, 1
%cmp.i = icmp slt i64 %inc.i, %N
br i1 %cmp.i, label %for.first, label %for.first.exit
for.second:
%j = phi i64 [ %inc.j, %for.second ], [ 0, %for.second.preheader ]
- %Bj = getelementptr inbounds i32, i32* %B, i64 %j
- store i32 0, i32* %Bj, align 4
+ %Bj = getelementptr inbounds i32, ptr %B, i64 %j
+ store i32 0, ptr %Bj, align 4
%inc.j = add nsw i64 %j, 1
%cmp.j = icmp slt i64 %inc.j, %N
br i1 %cmp.j, label %for.second, label %for.second.exit
; Test that `%add` is moved in for.second.exit, and the two loops for.first
; and for.second are fused.
-define void @moveinsts_exitblock(i32* noalias %A, i32* noalias %B, i64 %N, i32 %x) {
+define void @moveinsts_exitblock(ptr noalias %A, ptr noalias %B, i64 %N, i32 %x) {
; CHECK-LABEL: @moveinsts_exitblock(
; CHECK-NEXT: for.first.guard:
; CHECK-NEXT: [[CMP_GUARD:%.*]] = icmp slt i64 0, [[N:%.*]]
; CHECK: for.first:
; CHECK-NEXT: [[I_04:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_FIRST]] ], [ 0, [[FOR_FIRST_PREHEADER]] ]
; CHECK-NEXT: [[J_02:%.*]] = phi i64 [ [[INC6:%.*]], [[FOR_FIRST]] ], [ 0, [[FOR_FIRST_PREHEADER]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[I_04]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[I_04]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INC]] = add nsw i64 [[I_04]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[INC]], [[N]]
-; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[J_02]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX4]], align 4
+; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[J_02]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX4]], align 4
; CHECK-NEXT: [[INC6]] = add nsw i64 [[J_02]], 1
; CHECK-NEXT: [[CMP_J:%.*]] = icmp slt i64 [[INC6]], [[N]]
; CHECK-NEXT: br i1 [[CMP_J]], label [[FOR_FIRST]], label [[FOR_SECOND_EXIT:%.*]]
for.first:
%i.04 = phi i64 [ %inc, %for.first ], [ 0, %for.first.preheader ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.04
- store i32 0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %i.04
+ store i32 0, ptr %arrayidx, align 4
%inc = add nsw i64 %i.04, 1
%cmp = icmp slt i64 %inc, %N
br i1 %cmp, label %for.first, label %for.first.exit
for.second:
%j.02 = phi i64 [ %inc6, %for.second ], [ 0, %for.second.preheader ]
- %arrayidx4 = getelementptr inbounds i32, i32* %B, i64 %j.02
- store i32 0, i32* %arrayidx4, align 4
+ %arrayidx4 = getelementptr inbounds i32, ptr %B, i64 %j.02
+ store i32 0, ptr %arrayidx4, align 4
%inc6 = add nsw i64 %j.02, 1
%cmp.j = icmp slt i64 %inc6, %N
br i1 %cmp.j, label %for.second, label %for.second.exit
; Test that `%add` is moved in for.first.guard, and the two loops for.first
; and for.second are fused.
-define void @moveinsts_guardblock(i32* noalias %A, i32* noalias %B, i64 %N, i32 %x) {
+define void @moveinsts_guardblock(ptr noalias %A, ptr noalias %B, i64 %N, i32 %x) {
; CHECK-LABEL: @moveinsts_guardblock(
; CHECK-NEXT: for.first.guard:
; CHECK-NEXT: [[CMP_GUARD:%.*]] = icmp slt i64 0, [[N:%.*]]
; CHECK: for.first:
; CHECK-NEXT: [[I_04:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_FIRST]] ], [ 0, [[FOR_FIRST_PREHEADER]] ]
; CHECK-NEXT: [[J_02:%.*]] = phi i64 [ [[INC6:%.*]], [[FOR_FIRST]] ], [ 0, [[FOR_FIRST_PREHEADER]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[I_04]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[I_04]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INC]] = add nsw i64 [[I_04]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[INC]], [[N]]
-; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[J_02]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX4]], align 4
+; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[J_02]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX4]], align 4
; CHECK-NEXT: [[INC6]] = add nsw i64 [[J_02]], 1
; CHECK-NEXT: [[CMP_J:%.*]] = icmp slt i64 [[INC6]], [[N]]
; CHECK-NEXT: br i1 [[CMP_J]], label [[FOR_FIRST]], label [[FOR_SECOND_EXIT:%.*]]
for.first:
%i.04 = phi i64 [ %inc, %for.first ], [ 0, %for.first.preheader ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.04
- store i32 0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %i.04
+ store i32 0, ptr %arrayidx, align 4
%inc = add nsw i64 %i.04, 1
%cmp = icmp slt i64 %inc, %N
br i1 %cmp, label %for.first, label %for.first.exit
for.second:
%j.02 = phi i64 [ %inc6, %for.second ], [ 0, %for.second.preheader ]
- %arrayidx4 = getelementptr inbounds i32, i32* %B, i64 %j.02
- store i32 0, i32* %arrayidx4, align 4
+ %arrayidx4 = getelementptr inbounds i32, ptr %B, i64 %j.02
+ store i32 0, ptr %arrayidx4, align 4
%inc6 = add nsw i64 %j.02, 1
%cmp.j = icmp slt i64 %inc6, %N
br i1 %cmp.j, label %for.second, label %for.second.exit
; from for.second.guard to for.first.guard, and the two loops for.first and
; for.second are fused.
-define i64 @updatephi_guardnonloopblock(i32* noalias %A, i32* noalias %B, i64 %N, i32 %x) {
+define i64 @updatephi_guardnonloopblock(ptr noalias %A, ptr noalias %B, i64 %N, i32 %x) {
; CHECK-LABEL: @updatephi_guardnonloopblock(
; CHECK-NEXT: for.first.guard:
; CHECK-NEXT: [[CMP_GUARD:%.*]] = icmp slt i64 0, [[N:%.*]]
; CHECK: for.first:
; CHECK-NEXT: [[I_04:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_FIRST]] ], [ 0, [[FOR_FIRST_PREHEADER]] ]
; CHECK-NEXT: [[J_02:%.*]] = phi i64 [ [[INC6:%.*]], [[FOR_FIRST]] ], [ 0, [[FOR_FIRST_PREHEADER]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[I_04]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[I_04]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INC]] = add nsw i64 [[I_04]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[INC]], [[N]]
-; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[J_02]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX4]], align 4
+; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[J_02]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX4]], align 4
; CHECK-NEXT: [[INC6]] = add nsw i64 [[J_02]], 1
; CHECK-NEXT: [[CMP_J:%.*]] = icmp slt i64 [[INC6]], [[N]]
; CHECK-NEXT: br i1 [[CMP_J]], label [[FOR_FIRST]], label [[FOR_SECOND_EXIT:%.*]]
for.first:
%i.04 = phi i64 [ %inc, %for.first ], [ 0, %for.first.preheader ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.04
- store i32 0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %i.04
+ store i32 0, ptr %arrayidx, align 4
%inc = add nsw i64 %i.04, 1
%cmp = icmp slt i64 %inc, %N
br i1 %cmp, label %for.first, label %for.first.exit
for.second:
%j.02 = phi i64 [ %inc6, %for.second ], [ 0, %for.second.preheader ]
- %arrayidx4 = getelementptr inbounds i32, i32* %B, i64 %j.02
- store i32 0, i32* %arrayidx4, align 4
+ %arrayidx4 = getelementptr inbounds i32, ptr %B, i64 %j.02
+ store i32 0, ptr %arrayidx4, align 4
%inc6 = add nsw i64 %j.02, 1
%cmp.j = icmp slt i64 %inc6, %N
br i1 %cmp.j, label %for.second, label %for.second.exit
@B = common global [1024 x i32] zeroinitializer, align 16
-; CHECK-LABEL: void @main(i32* noalias %A)
+; CHECK-LABEL: void @main(ptr noalias %A)
; CHECK-NEXT: entry:
; CHECK: br i1 %cmp4, label %for.first.entry, label %for.end
; CHECK: for.first.entry
; CHECK: for.end:
; CHECK-NEXT: ret void
-define void @main(i32* noalias %A) {
+define void @main(ptr noalias %A) {
entry:
%cmp4 = icmp slt i64 0, 45
br i1 %cmp4, label %for.first.entry, label %for.second.guard
%mul = mul nsw i64 %sub, %add
%rem = srem i64 %mul, %i.05
%conv = trunc i64 %rem to i32
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.05
- store i32 %conv, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %i.05
+ store i32 %conv, ptr %arrayidx, align 4
%inc = add nsw i64 %i.05, 1
%cmp = icmp slt i64 %inc, 45
br i1 %cmp, label %for.first, label %for.first.exit
%mul9 = mul nsw i64 %sub7, %add8
%rem10 = srem i64 %mul9, %i1.02
%conv11 = trunc i64 %rem10 to i32
- %arrayidx12 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %i1.02
- store i32 %conv11, i32* %arrayidx12, align 4
+ %arrayidx12 = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %i1.02
+ store i32 %conv11, ptr %arrayidx12, align 4
%inc14 = add nsw i64 %i1.02, 1
%cmp3 = icmp slt i64 %inc14, 45
br i1 %cmp3, label %for.second, label %for.second.exit
; loops unsafe to fuse together.
; The expected output of this test is the function as below.
-; CHECK-LABEL: void @unsafe_exitblock(i32* noalias %A, i32* noalias %B)
+; CHECK-LABEL: void @unsafe_exitblock(ptr noalias %A, ptr noalias %B)
; CHECK: for.first.guard
; CHECK: br i1 %cmp3, label %for.first.preheader, label %for.second.guard
; CHECK: for.first.preheader:
; CHECK: for.end:
; CHECK-NEXT: ret void
-define void @unsafe_exitblock(i32* noalias %A, i32* noalias %B) {
+define void @unsafe_exitblock(ptr noalias %A, ptr noalias %B) {
for.first.guard:
%cmp3 = icmp slt i64 0, 45
br i1 %cmp3, label %for.first.preheader, label %for.second.guard
for.first: ; preds = %for.first.preheader, %for.first
%i.04 = phi i64 [ %inc, %for.first ], [ 0, %for.first.preheader ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.04
- store i32 0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %i.04
+ store i32 0, ptr %arrayidx, align 4
%inc = add nsw i64 %i.04, 1
%cmp = icmp slt i64 %inc, 45
br i1 %cmp, label %for.first, label %for.first.exit
for.second: ; preds = %for.second.preheader, %for.second
%j.02 = phi i64 [ %inc6, %for.second ], [ 2, %for.second.preheader ]
- %arrayidx4 = getelementptr inbounds i32, i32* %B, i64 %j.02
- store i32 0, i32* %arrayidx4, align 4
+ %arrayidx4 = getelementptr inbounds i32, ptr %B, i64 %j.02
+ store i32 0, ptr %arrayidx4, align 4
%inc6 = add nsw i64 %j.02, 1
%cmp2 = icmp slt i64 %inc6, 45
br i1 %cmp2, label %for.second, label %for.second.exit
; CHECK-LABEL: @hoist_preheader(
; CHECK-NEXT: pre1:
; CHECK-NEXT: [[PTR:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[B:%.*]] = load i32, i32* [[PTR]], align 4
+; CHECK-NEXT: [[B:%.*]] = load i32, ptr [[PTR]], align 4
; CHECK-NEXT: br label [[BODY1:%.*]]
; CHECK: body1:
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[BODY1]] ], [ 0, [[PRE1:%.*]] ]
br i1 %cond, label %body1, label %pre2
pre2:
- %b = load i32, i32 * %ptr
+ %b = load i32, ptr %ptr
br label %body2
body2: ; preds = %pre2, %body2
; CHECK-LABEL: @hoist_preheader(
; CHECK-NEXT: pre1:
; CHECK-NEXT: [[PTR:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 3, i32* [[PTR]], align 4
+; CHECK-NEXT: store i32 3, ptr [[PTR]], align 4
; CHECK-NEXT: br label [[BODY1:%.*]]
; CHECK: body1:
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[BODY1]] ], [ 0, [[PRE1:%.*]] ]
br i1 %cond, label %body1, label %pre2
pre2:
- store i32 3, i32* %ptr
+ store i32 3, ptr %ptr
br label %body2
body2: ; preds = %pre2, %body2
%tmp17 = mul nsw i32 %tmp, %tmp16
%tmp18 = trunc i64 %indvars.iv6 to i32
%tmp19 = srem i32 %tmp17, %tmp18
- %tmp20 = getelementptr inbounds [1024 x [1024 x i32]], [1024 x [1024 x i32]]* @A, i64 0, i64 %indvars.iv6, i64 %indvars.iv
- store i32 %tmp19, i32* %tmp20, align 4
+ %tmp20 = getelementptr inbounds [1024 x [1024 x i32]], ptr @A, i64 0, i64 %indvars.iv6, i64 %indvars.iv
+ store i32 %tmp19, ptr %tmp20, align 4
br label %bb21
bb21: ; preds = %bb14
%tmp29 = mul nsw i32 %tmp26, %tmp28
%tmp30 = trunc i64 %indvars.iv6 to i32
%tmp31 = srem i32 %tmp29, %tmp30
- %tmp32 = getelementptr inbounds [1024 x [1024 x i32]], [1024 x [1024 x i32]]* @B, i64 0, i64 %indvars.iv6, i64 %indvars.iv3
- store i32 %tmp31, i32* %tmp32, align 4
+ %tmp32 = getelementptr inbounds [1024 x [1024 x i32]], ptr @B, i64 0, i64 %indvars.iv6, i64 %indvars.iv3
+ store i32 %tmp31, ptr %tmp32, align 4
br label %bb33
bb33: ; preds = %bb25
%tmp21 = mul nsw i32 %tmp, %tmp20
%tmp22 = trunc i64 %indvars.iv105 to i32
%tmp23 = srem i32 %tmp21, %tmp22
- %tmp24 = getelementptr inbounds [1024 x [1024 x i32]], [1024 x [1024 x i32]]* @A, i64 0, i64 %indvars.iv105, i64 %indvars.iv74
- store i32 %tmp23, i32* %tmp24, align 4
+ %tmp24 = getelementptr inbounds [1024 x [1024 x i32]], ptr @A, i64 0, i64 %indvars.iv105, i64 %indvars.iv74
+ store i32 %tmp23, ptr %tmp24, align 4
br label %bb25
bb25: ; preds = %bb18
%tmp39 = mul nsw i32 %tmp36, %tmp38
%tmp40 = trunc i64 %indvars.iv42 to i32
%tmp41 = srem i32 %tmp39, %tmp40
- %tmp42 = getelementptr inbounds [1024 x [1024 x i32]], [1024 x [1024 x i32]]* @B, i64 0, i64 %indvars.iv42, i64 %indvars.iv1
- store i32 %tmp41, i32* %tmp42, align 4
+ %tmp42 = getelementptr inbounds [1024 x [1024 x i32]], ptr @B, i64 0, i64 %indvars.iv42, i64 %indvars.iv1
+ store i32 %tmp41, ptr %tmp42, align 4
br label %bb43
bb43: ; preds = %bb35
br label %body1
; CHECK:body1:
-; CHECK-NOT: store atomic i32 3, i32* %ptr seq_cst, align 4
+; CHECK-NOT: store atomic i32 3, ptr %ptr seq_cst, align 4
body1: ; preds = %pre1, %body1
%i = phi i32 [%i_next, %body1], [0, %pre1]
%i_next = add i32 1, %i
br i1 %cond, label %body1, label %pre2
; CHECK:pre2:
-; CHECK-NEXT: store atomic i32 3, i32* %ptr seq_cst, align 4
+; CHECK-NEXT: store atomic i32 3, ptr %ptr seq_cst, align 4
pre2:
- store atomic i32 3, i32* %ptr seq_cst, align 4
+ store atomic i32 3, ptr %ptr seq_cst, align 4
br label %body2
; CHECK: body2:
-; CHECK-NOT: store atomic i32 3, i32* %ptr seq_cst, align 4
+; CHECK-NOT: store atomic i32 3, ptr %ptr seq_cst, align 4
body2: ; preds = %pre2, %body2
%i2 = phi i32 [%i_next2, %body2], [0, %pre2]
%i_next2 = add i32 1, %i2
%i = phi i32 [%i_next, %body1], [0, %pre1]
%i_next = add i32 1, %i
%cond = icmp ne i32 %i, %N
- store i32 3, i32* %ptr
+ store i32 3, ptr %ptr
br i1 %cond, label %body1, label %pre2
; CHECK:pre2:
-; CHECK-NEXT: %stay = load i32, i32* %ptr
+; CHECK-NEXT: %stay = load i32, ptr %ptr
pre2:
- %stay = load i32, i32* %ptr
+ %stay = load i32, ptr %ptr
br label %body2
; CHECK: body2:
%i2 = phi i32 [%i_next2, %body2], [0, %pre2]
%i_next2 = add i32 1, %i2
%cond2 = icmp ne i32 %i2, %N
- store i32 3, i32* %ptr
+ store i32 3, ptr %ptr
br i1 %cond2, label %body2, label %exit
; CHECK: exit:
br label %body1
; CHECK:body1:
-; CHECK-NOT: store i32 3, i32* %ptr
+; CHECK-NOT: store i32 3, ptr %ptr
body1: ; preds = %pre1, %body1
%i = phi i32 [%i_next, %body1], [0, %pre1]
%i_next = add i32 1, %i
%cond = icmp ne i32 %i, %N
- %load1 = load i32, i32* %ptr
+ %load1 = load i32, ptr %ptr
br i1 %cond, label %body1, label %pre2
; CHECK:pre2:
-; CHECK-NEXT: store i32 3, i32* %ptr
+; CHECK-NEXT: store i32 3, ptr %ptr
pre2:
- store i32 3, i32* %ptr
+ store i32 3, ptr %ptr
br label %body2
; CHECK: body2:
-; CHECK-NOT: store i32 3, i32* %ptr
+; CHECK-NOT: store i32 3, ptr %ptr
body2: ; preds = %pre2, %body2
%i2 = phi i32 [%i_next2, %body2], [0, %pre2]
%i_next2 = add i32 1, %i2
%cond2 = icmp ne i32 %i2, %N
- %load2 = load i32, i32* %ptr
+ %load2 = load i32, ptr %ptr
br i1 %cond2, label %body2, label %exit
; CHECK: exit:
-; CHECK-NOT: store i32 3, i32* %ptr
+; CHECK-NOT: store i32 3, ptr %ptr
exit:
ret void
}
br label %body1
; CHECK:body1:
-; CHECK-NOT: store volatile i32 3, i32* %ptr
+; CHECK-NOT: store volatile i32 3, ptr %ptr
body1: ; preds = %pre1, %body1
%i = phi i32 [%i_next, %body1], [0, %pre1]
%i_next = add i32 1, %i
br i1 %cond, label %body1, label %pre2
; CHECK:pre2:
-; CHECK-NEXT: store volatile i32 3, i32* %ptr
+; CHECK-NEXT: store volatile i32 3, ptr %ptr
pre2:
- store volatile i32 3, i32* %ptr
+ store volatile i32 3, ptr %ptr
br label %body2
; CHECK: body2:
-; CHECK-NOT: store volatile i32 3, i32* %ptr
+; CHECK-NOT: store volatile i32 3, ptr %ptr
body2: ; preds = %pre2, %body2
%i2 = phi i32 [%i_next2, %body2], [0, %pre2]
%i_next2 = add i32 1, %i2
; peeling).
; The expected output of this test is the function below.
-; CHECK-LABEL: void @function(i32* noalias %arg)
+; CHECK-LABEL: void @function(ptr noalias %arg)
; CHECK-NEXT: for.first.preheader:
; CHECK-NEXT: br label %for.first
; CHECK: for.first:
@B = common global [1024 x i32] zeroinitializer, align 16
-define void @function(i32* noalias %arg) {
+define void @function(ptr noalias %arg) {
for.first.preheader:
br label %for.first
%tmp10 = mul nsw i32 %tmp, %tmp9
%tmp11 = trunc i64 %indvars.iv23 to i32
%tmp12 = srem i32 %tmp10, %tmp11
- %tmp13 = getelementptr inbounds i32, i32* %arg, i64 %indvars.iv23
- store i32 %tmp12, i32* %tmp13, align 4
+ %tmp13 = getelementptr inbounds i32, ptr %arg, i64 %indvars.iv23
+ store i32 %tmp12, ptr %tmp13, align 4
br label %for.first.latch
for.first.latch: ; preds = %for.first
%tmp23 = mul nsw i32 %tmp20, %tmp22
%tmp24 = trunc i64 %indvars.iv1 to i32
%tmp25 = srem i32 %tmp23, %tmp24
- %tmp26 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %indvars.iv1
- store i32 %tmp25, i32* %tmp26, align 4
+ %tmp26 = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %indvars.iv1
+ store i32 %tmp25, ptr %tmp26, align 4
br label %for.second.latch
for.second.latch: ; preds = %for.second
; B[i] = ((i-6)*(i+3)) % i;
; }
-; CHECK-LABEL: void @function(i32* noalias %arg)
+; CHECK-LABEL: void @function(ptr noalias %arg)
; CHECK-NEXT: for.first.preheader:
; CHECK-NEXT: br label %for.first.peel.begin
; CHECK: for.first.peel.begin:
@B = common global [1024 x i32] zeroinitializer, align 16
-define void @function(i32* noalias %arg) {
+define void @function(ptr noalias %arg) {
for.first.preheader:
br label %for.first
%tmp10 = mul nsw i32 %tmp, %tmp9
%tmp11 = trunc i64 %indvars.iv23 to i32
%tmp12 = srem i32 %tmp10, %tmp11
- %tmp13 = getelementptr inbounds i32, i32* %arg, i64 %indvars.iv23
- store i32 %tmp12, i32* %tmp13, align 4
+ %tmp13 = getelementptr inbounds i32, ptr %arg, i64 %indvars.iv23
+ store i32 %tmp12, ptr %tmp13, align 4
br label %for.first.latch
for.first.latch: ; preds = %for.first
%tmp23 = mul nsw i32 %tmp20, %tmp22
%tmp24 = trunc i64 %indvars.iv1 to i32
%tmp25 = srem i32 %tmp23, %tmp24
- %tmp26 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %indvars.iv1
- store i32 %tmp25, i32* %tmp26, align 4
+ %tmp26 = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %indvars.iv1
+ store i32 %tmp25, ptr %tmp26, align 4
br label %for.second.latch
for.second.latch: ; preds = %for.second
@B = common global [1024 x i32] zeroinitializer, align 16
-define void @dep_free(i32* noalias %arg) {
+define void @dep_free(ptr noalias %arg) {
; CHECK-LABEL: @dep_free(
; CHECK-NEXT: bb:
; CHECK-NEXT: br label [[BB7:%.*]]
; CHECK-NEXT: [[TMP10:%.*]] = mul nsw i32 [[TMP]], [[TMP9]]
; CHECK-NEXT: [[TMP11:%.*]] = trunc i64 [[INDVARS_IV23]] to i32
; CHECK-NEXT: [[TMP12:%.*]] = srem i32 [[TMP10]], [[TMP11]]
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[ARG:%.*]], i64 [[INDVARS_IV23]]
-; CHECK-NEXT: store i32 [[TMP12]], i32* [[TMP13]], align 4
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[ARG:%.*]], i64 [[INDVARS_IV23]]
+; CHECK-NEXT: store i32 [[TMP12]], ptr [[TMP13]], align 4
; CHECK-NEXT: br label [[BB14:%.*]]
; CHECK: bb14:
; CHECK-NEXT: [[TMP20:%.*]] = add nsw i32 [[DOT02]], -3
; CHECK-NEXT: [[TMP23:%.*]] = mul nsw i32 [[TMP20]], [[TMP22]]
; CHECK-NEXT: [[TMP24:%.*]] = trunc i64 [[INDVARS_IV1]] to i32
; CHECK-NEXT: [[TMP25:%.*]] = srem i32 [[TMP23]], [[TMP24]]
-; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 [[INDVARS_IV1]]
-; CHECK-NEXT: store i32 [[TMP25]], i32* [[TMP26]], align 4
+; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 [[INDVARS_IV1]]
+; CHECK-NEXT: store i32 [[TMP25]], ptr [[TMP26]], align 4
; CHECK-NEXT: br label [[BB27]]
; CHECK: bb27:
; CHECK-NEXT: [[INDVARS_IV_NEXT3]] = add nuw nsw i64 [[INDVARS_IV23]], 1
%tmp10 = mul nsw i32 %tmp, %tmp9
%tmp11 = trunc i64 %indvars.iv23 to i32
%tmp12 = srem i32 %tmp10, %tmp11
- %tmp13 = getelementptr inbounds i32, i32* %arg, i64 %indvars.iv23
- store i32 %tmp12, i32* %tmp13, align 4
+ %tmp13 = getelementptr inbounds i32, ptr %arg, i64 %indvars.iv23
+ store i32 %tmp12, ptr %tmp13, align 4
br label %bb14
bb14: ; preds = %bb7
%tmp23 = mul nsw i32 %tmp20, %tmp22
%tmp24 = trunc i64 %indvars.iv1 to i32
%tmp25 = srem i32 %tmp23, %tmp24
- %tmp26 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %indvars.iv1
- store i32 %tmp25, i32* %tmp26, align 4
+ %tmp26 = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %indvars.iv1
+ store i32 %tmp25, ptr %tmp26, align 4
br label %bb27
bb27: ; preds = %bb19
ret void
}
-define void @dep_free_parametric(i32* noalias %arg, i64 %arg2) {
+define void @dep_free_parametric(ptr noalias %arg, i64 %arg2) {
; CHECK-LABEL: @dep_free_parametric(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP3:%.*]] = icmp slt i64 0, [[ARG2:%.*]]
; CHECK-NEXT: [[TMP8:%.*]] = mul nsw i64 [[TMP6]], [[TMP7]]
; CHECK-NEXT: [[TMP9:%.*]] = srem i64 [[TMP8]], [[DOT014]]
; CHECK-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, i32* [[ARG:%.*]], i64 [[DOT014]]
-; CHECK-NEXT: store i32 [[TMP10]], i32* [[TMP11]], align 4
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[ARG:%.*]], i64 [[DOT014]]
+; CHECK-NEXT: store i32 [[TMP10]], ptr [[TMP11]], align 4
; CHECK-NEXT: br label [[BB12:%.*]]
; CHECK: bb12:
; CHECK-NEXT: [[TMP19:%.*]] = add nsw i64 [[DOT02]], -3
; CHECK-NEXT: [[TMP21:%.*]] = mul nsw i64 [[TMP19]], [[TMP20]]
; CHECK-NEXT: [[TMP22:%.*]] = srem i64 [[TMP21]], [[DOT02]]
; CHECK-NEXT: [[TMP23:%.*]] = trunc i64 [[TMP22]] to i32
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 [[DOT02]]
-; CHECK-NEXT: store i32 [[TMP23]], i32* [[TMP24]], align 4
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 [[DOT02]]
+; CHECK-NEXT: store i32 [[TMP23]], ptr [[TMP24]], align 4
; CHECK-NEXT: br label [[BB25]]
; CHECK: bb25:
; CHECK-NEXT: [[TMP13]] = add nuw nsw i64 [[DOT014]], 1
%tmp8 = mul nsw i64 %tmp6, %tmp7
%tmp9 = srem i64 %tmp8, %.014
%tmp10 = trunc i64 %tmp9 to i32
- %tmp11 = getelementptr inbounds i32, i32* %arg, i64 %.014
- store i32 %tmp10, i32* %tmp11, align 4
+ %tmp11 = getelementptr inbounds i32, ptr %arg, i64 %.014
+ store i32 %tmp10, ptr %tmp11, align 4
br label %bb12
bb12: ; preds = %bb5
%tmp21 = mul nsw i64 %tmp19, %tmp20
%tmp22 = srem i64 %tmp21, %.02
%tmp23 = trunc i64 %tmp22 to i32
- %tmp24 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %.02
- store i32 %tmp23, i32* %tmp24, align 4
+ %tmp24 = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %.02
+ store i32 %tmp23, ptr %tmp24, align 4
br label %bb25
bb25: ; preds = %bb18
ret void
}
-define void @raw_only(i32* noalias %arg) {
+define void @raw_only(ptr noalias %arg) {
; CHECK-LABEL: @raw_only(
; CHECK-NEXT: bb:
; CHECK-NEXT: br label [[BB7:%.*]]
; CHECK: bb7:
; CHECK-NEXT: [[INDVARS_IV22:%.*]] = phi i64 [ 0, [[BB:%.*]] ], [ [[INDVARS_IV_NEXT3:%.*]], [[BB18:%.*]] ]
; CHECK-NEXT: [[INDVARS_IV1:%.*]] = phi i64 [ 0, [[BB]] ], [ [[INDVARS_IV_NEXT:%.*]], [[BB18]] ]
-; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, i32* [[ARG:%.*]], i64 [[INDVARS_IV22]]
+; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, ptr [[ARG:%.*]], i64 [[INDVARS_IV22]]
; CHECK-NEXT: [[TMP8:%.*]] = trunc i64 [[INDVARS_IV22]] to i32
-; CHECK-NEXT: store i32 [[TMP8]], i32* [[TMP]], align 4
+; CHECK-NEXT: store i32 [[TMP8]], ptr [[TMP]], align 4
; CHECK-NEXT: br label [[BB9:%.*]]
; CHECK: bb9:
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, i32* [[ARG]], i64 [[INDVARS_IV1]]
-; CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[ARG]], i64 [[INDVARS_IV1]]
+; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
; CHECK-NEXT: [[TMP16:%.*]] = shl nsw i32 [[TMP15]], 1
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 [[INDVARS_IV1]]
-; CHECK-NEXT: store i32 [[TMP16]], i32* [[TMP17]], align 4
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 [[INDVARS_IV1]]
+; CHECK-NEXT: store i32 [[TMP16]], ptr [[TMP17]], align 4
; CHECK-NEXT: br label [[BB18]]
; CHECK: bb18:
; CHECK-NEXT: [[INDVARS_IV_NEXT3]] = add nuw nsw i64 [[INDVARS_IV22]], 1
bb7: ; preds = %bb, %bb9
%indvars.iv22 = phi i64 [ 0, %bb ], [ %indvars.iv.next3, %bb9 ]
- %tmp = getelementptr inbounds i32, i32* %arg, i64 %indvars.iv22
+ %tmp = getelementptr inbounds i32, ptr %arg, i64 %indvars.iv22
%tmp8 = trunc i64 %indvars.iv22 to i32
- store i32 %tmp8, i32* %tmp, align 4
+ store i32 %tmp8, ptr %tmp, align 4
br label %bb9
bb9: ; preds = %bb7
bb13: ; preds = %bb11.preheader, %bb18
%indvars.iv1 = phi i64 [ 0, %bb11.preheader ], [ %indvars.iv.next, %bb18 ]
- %tmp14 = getelementptr inbounds i32, i32* %arg, i64 %indvars.iv1
- %tmp15 = load i32, i32* %tmp14, align 4
+ %tmp14 = getelementptr inbounds i32, ptr %arg, i64 %indvars.iv1
+ %tmp15 = load i32, ptr %tmp14, align 4
%tmp16 = shl nsw i32 %tmp15, 1
- %tmp17 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %indvars.iv1
- store i32 %tmp16, i32* %tmp17, align 4
+ %tmp17 = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %indvars.iv1
+ store i32 %tmp16, ptr %tmp17, align 4
br label %bb18
bb18: ; preds = %bb13
ret void
}
-define void @raw_only_parametric(i32* noalias %arg, i32 %arg4) {
+define void @raw_only_parametric(ptr noalias %arg, i32 %arg4) {
; CHECK-LABEL: @raw_only_parametric(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP:%.*]] = sext i32 [[ARG4:%.*]] to i64
; CHECK: bb8:
; CHECK-NEXT: [[INDVARS_IV25:%.*]] = phi i64 [ [[INDVARS_IV_NEXT3:%.*]], [[BB8]] ], [ 0, [[BB8_PREHEADER]] ]
; CHECK-NEXT: [[INDVARS_IV3:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[BB8]] ], [ 0, [[BB8_PREHEADER]] ]
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[ARG:%.*]], i64 [[INDVARS_IV25]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[ARG:%.*]], i64 [[INDVARS_IV25]]
; CHECK-NEXT: [[TMP10:%.*]] = trunc i64 [[INDVARS_IV25]] to i32
-; CHECK-NEXT: store i32 [[TMP10]], i32* [[TMP9]], align 4
+; CHECK-NEXT: store i32 [[TMP10]], ptr [[TMP9]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT3]] = add nuw nsw i64 [[INDVARS_IV25]], 1
; CHECK-NEXT: [[TMP6:%.*]] = icmp slt i64 [[INDVARS_IV_NEXT3]], [[TMP]]
-; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, i32* [[ARG]], i64 [[INDVARS_IV3]]
-; CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[ARG]], i64 [[INDVARS_IV3]]
+; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4
; CHECK-NEXT: [[TMP20:%.*]] = shl nsw i32 [[TMP19]], 1
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 [[INDVARS_IV3]]
-; CHECK-NEXT: store i32 [[TMP20]], i32* [[TMP21]], align 4
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 [[INDVARS_IV3]]
+; CHECK-NEXT: store i32 [[TMP20]], ptr [[TMP21]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV3]], 1
; CHECK-NEXT: [[TMP15:%.*]] = icmp slt i64 [[INDVARS_IV_NEXT]], [[TMP]]
; CHECK-NEXT: br i1 [[TMP15]], label [[BB8]], label [[BB23_LOOPEXIT:%.*]]
bb8: ; preds = %bb, %bb8
%indvars.iv25 = phi i64 [ %indvars.iv.next3, %bb8 ], [ 0, %bb ]
- %tmp9 = getelementptr inbounds i32, i32* %arg, i64 %indvars.iv25
+ %tmp9 = getelementptr inbounds i32, ptr %arg, i64 %indvars.iv25
%tmp10 = trunc i64 %indvars.iv25 to i32
- store i32 %tmp10, i32* %tmp9, align 4
+ store i32 %tmp10, ptr %tmp9, align 4
%indvars.iv.next3 = add nuw nsw i64 %indvars.iv25, 1
%tmp6 = icmp slt i64 %indvars.iv.next3, %tmp
br i1 %tmp6, label %bb8, label %bb17
bb17: ; preds = %bb8, %bb17
%indvars.iv3 = phi i64 [ %indvars.iv.next, %bb17 ], [ 0, %bb8 ]
- %tmp18 = getelementptr inbounds i32, i32* %arg, i64 %indvars.iv3
- %tmp19 = load i32, i32* %tmp18, align 4
+ %tmp18 = getelementptr inbounds i32, ptr %arg, i64 %indvars.iv3
+ %tmp19 = load i32, ptr %tmp18, align 4
%tmp20 = shl nsw i32 %tmp19, 1
- %tmp21 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %indvars.iv3
- store i32 %tmp20, i32* %tmp21, align 4
+ %tmp21 = getelementptr inbounds [1024 x i32], ptr @B, i64 0, i64 %indvars.iv3
+ store i32 %tmp20, ptr %tmp21, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv3, 1
%tmp15 = icmp slt i64 %indvars.iv.next, %tmp
br i1 %tmp15, label %bb17, label %bb23
ret void
}
-define void @forward_dep(i32* noalias %arg) {
+define void @forward_dep(ptr noalias %arg) {
; CHECK-LABEL: @forward_dep(
; CHECK-NEXT: bb:
; CHECK-NEXT: br label [[BB7:%.*]]
; CHECK-NEXT: [[TMP10:%.*]] = mul nsw i32 [[TMP]], [[TMP9]]
; CHECK-NEXT: [[TMP11:%.*]] = trunc i64 [[INDVARS_IV22]] to i32
; CHECK-NEXT: [[TMP12:%.*]] = srem i32 [[TMP10]], [[TMP11]]
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[ARG:%.*]], i64 [[INDVARS_IV22]]
-; CHECK-NEXT: store i32 [[TMP12]], i32* [[TMP13]], align 4
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[ARG:%.*]], i64 [[INDVARS_IV22]]
+; CHECK-NEXT: store i32 [[TMP12]], ptr [[TMP13]], align 4
; CHECK-NEXT: br label [[BB14]]
; CHECK: bb14:
; CHECK-NEXT: [[INDVARS_IV_NEXT3]] = add nuw nsw i64 [[INDVARS_IV22]], 1
; CHECK: bb19:
; CHECK-NEXT: [[INDVARS_IV1:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[BB25:%.*]] ], [ 0, [[BB19_PREHEADER]] ]
; CHECK-NEXT: [[TMP20:%.*]] = add nsw i64 [[INDVARS_IV1]], -3
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, i32* [[ARG]], i64 [[TMP20]]
-; CHECK-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[ARG]], i64 [[TMP20]]
+; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = mul nsw i32 [[TMP22]], 3
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, i32* [[ARG]], i64 [[INDVARS_IV1]]
-; CHECK-NEXT: store i32 [[TMP23]], i32* [[TMP24]], align 4
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[ARG]], i64 [[INDVARS_IV1]]
+; CHECK-NEXT: store i32 [[TMP23]], ptr [[TMP24]], align 4
; CHECK-NEXT: br label [[BB25]]
; CHECK: bb25:
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV1]], 1
%tmp10 = mul nsw i32 %tmp, %tmp9
%tmp11 = trunc i64 %indvars.iv22 to i32
%tmp12 = srem i32 %tmp10, %tmp11
- %tmp13 = getelementptr inbounds i32, i32* %arg, i64 %indvars.iv22
- store i32 %tmp12, i32* %tmp13, align 4
+ %tmp13 = getelementptr inbounds i32, ptr %arg, i64 %indvars.iv22
+ store i32 %tmp12, ptr %tmp13, align 4
br label %bb14
bb14: ; preds = %bb7
bb19: ; preds = %bb14, %bb25
%indvars.iv1 = phi i64 [ 0, %bb14 ], [ %indvars.iv.next, %bb25 ]
%tmp20 = add nsw i64 %indvars.iv1, -3
- %tmp21 = getelementptr inbounds i32, i32* %arg, i64 %tmp20
- %tmp22 = load i32, i32* %tmp21, align 4
+ %tmp21 = getelementptr inbounds i32, ptr %arg, i64 %tmp20
+ %tmp22 = load i32, ptr %tmp21, align 4
%tmp23 = mul nsw i32 %tmp22, 3
- %tmp24 = getelementptr inbounds i32, i32* %arg, i64 %indvars.iv1
- store i32 %tmp23, i32* %tmp24, align 4
+ %tmp24 = getelementptr inbounds i32, ptr %arg, i64 %indvars.iv1
+ store i32 %tmp23, ptr %tmp24, align 4
br label %bb25
bb25: ; preds = %bb19
; latch iff it is proven safe. %inc.first and %cmp.first are moved, but
; `store i32 0, i32* %Ai.first` is not.
-define void @flow_dep(i32* noalias %A, i32* noalias %B) {
+define void @flow_dep(ptr noalias %A, ptr noalias %B) {
; CHECK-LABEL: @flow_dep(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_FIRST:%.*]]
; CHECK: for.first:
; CHECK-NEXT: [[I_FIRST:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC_FIRST:%.*]], [[FOR_SECOND_LATCH:%.*]] ]
; CHECK-NEXT: [[I_SECOND:%.*]] = phi i64 [ [[INC_SECOND:%.*]], [[FOR_SECOND_LATCH]] ], [ 0, [[ENTRY]] ]
-; CHECK-NEXT: [[AI_FIRST:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[I_FIRST]]
-; CHECK-NEXT: store i32 0, i32* [[AI_FIRST]], align 4
-; CHECK-NEXT: [[AI_SECOND:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[I_SECOND]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[AI_SECOND]], align 4
-; CHECK-NEXT: [[BI:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[I_SECOND]]
-; CHECK-NEXT: store i32 [[TMP0]], i32* [[BI]], align 4
+; CHECK-NEXT: [[AI_FIRST:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[I_FIRST]]
+; CHECK-NEXT: store i32 0, ptr [[AI_FIRST]], align 4
+; CHECK-NEXT: [[AI_SECOND:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I_SECOND]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[AI_SECOND]], align 4
+; CHECK-NEXT: [[BI:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[I_SECOND]]
+; CHECK-NEXT: store i32 [[TMP0]], ptr [[BI]], align 4
; CHECK-NEXT: br label [[FOR_SECOND_LATCH]]
; CHECK: for.second.latch:
; CHECK-NEXT: [[INC_FIRST]] = add nsw i64 [[I_FIRST]], 1
for.first:
%i.first = phi i64 [ 0, %entry ], [ %inc.first, %for.first ]
- %Ai.first = getelementptr inbounds i32, i32* %A, i64 %i.first
- store i32 0, i32* %Ai.first, align 4
+ %Ai.first = getelementptr inbounds i32, ptr %A, i64 %i.first
+ store i32 0, ptr %Ai.first, align 4
%inc.first = add nsw i64 %i.first, 1
%cmp.first = icmp slt i64 %inc.first, 100
br i1 %cmp.first, label %for.first, label %for.second.preheader
for.second:
%i.second = phi i64 [ %inc.second, %for.second.latch ], [ 0, %for.second.preheader ]
- %Ai.second = getelementptr inbounds i32, i32* %A, i64 %i.second
- %0 = load i32, i32* %Ai.second, align 4
- %Bi = getelementptr inbounds i32, i32* %B, i64 %i.second
- store i32 %0, i32* %Bi, align 4
+ %Ai.second = getelementptr inbounds i32, ptr %A, i64 %i.second
+ %0 = load i32, ptr %Ai.second, align 4
+ %Bi = getelementptr inbounds i32, ptr %B, i64 %i.second
+ store i32 %0, ptr %Bi, align 4
br label %for.second.latch
for.second.latch:
; Test that `%add` is moved in basic block entry, and the two loops for.first
; and for.second are fused.
-define i32 @moveinsts_preheader(i32* %A, i32 %x) {
+define i32 @moveinsts_preheader(ptr %A, i32 %x) {
; CHECK-LABEL: @moveinsts_preheader(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[X:%.*]], 1
; CHECK: for.first:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC_I:%.*]], [[FOR_FIRST]] ]
; CHECK-NEXT: [[J:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INC_J:%.*]], [[FOR_FIRST]] ]
-; CHECK-NEXT: [[AI:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[I]]
-; CHECK-NEXT: store i32 0, i32* [[AI]], align 4
+; CHECK-NEXT: [[AI:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[I]]
+; CHECK-NEXT: store i32 0, ptr [[AI]], align 4
; CHECK-NEXT: [[INC_I]] = add nsw i64 [[I]], 1
; CHECK-NEXT: [[CMP_I:%.*]] = icmp slt i64 [[INC_I]], 100
-; CHECK-NEXT: [[AJ:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[J]]
-; CHECK-NEXT: store i32 2, i32* [[AJ]], align 4
+; CHECK-NEXT: [[AJ:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[J]]
+; CHECK-NEXT: store i32 2, ptr [[AJ]], align 4
; CHECK-NEXT: [[INC_J]] = add nsw i64 [[J]], 1
; CHECK-NEXT: [[CMP_J:%.*]] = icmp slt i64 [[INC_J]], 100
; CHECK-NEXT: br i1 [[CMP_J]], label [[FOR_FIRST]], label [[FOR_SECOND_EXIT:%.*]]
for.first:
%i = phi i64 [ 0, %entry ], [ %inc.i, %for.first ]
- %Ai = getelementptr inbounds i32, i32* %A, i64 %i
- store i32 0, i32* %Ai, align 4
+ %Ai = getelementptr inbounds i32, ptr %A, i64 %i
+ store i32 0, ptr %Ai, align 4
%inc.i = add nsw i64 %i, 1
%cmp.i = icmp slt i64 %inc.i, 100
br i1 %cmp.i, label %for.first, label %for.first.exit
for.second:
%j = phi i64 [ 0, %for.first.exit ], [ %inc.j, %for.second ]
- %Aj = getelementptr inbounds i32, i32* %A, i64 %j
- store i32 2, i32* %Aj, align 4
+ %Aj = getelementptr inbounds i32, ptr %A, i64 %j
+ store i32 2, ptr %Aj, align 4
%inc.j = add nsw i64 %j, 1
%cmp.j = icmp slt i64 %inc.j, 100
br i1 %cmp.j, label %for.second, label %for.second.exit
; since it is used in for.second. Check also that the two loops for.first and
; for.second are not fused.
-define i64 @unsafe_preheader(i32* %A, i64 %x) {
+define i64 @unsafe_preheader(ptr %A, i64 %x) {
; CHECK-LABEL: @unsafe_preheader(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_FIRST:%.*]]
; CHECK: for.first:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC_I:%.*]], [[FOR_FIRST]] ]
-; CHECK-NEXT: [[AI:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[I]]
-; CHECK-NEXT: store i32 0, i32* [[AI]], align 4
+; CHECK-NEXT: [[AI:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[I]]
+; CHECK-NEXT: store i32 0, ptr [[AI]], align 4
; CHECK-NEXT: [[INC_I]] = add nsw i64 [[I]], 1
; CHECK-NEXT: [[CMP_I:%.*]] = icmp slt i64 [[INC_I]], 100
; CHECK-NEXT: br i1 [[CMP_I]], label [[FOR_FIRST]], label [[FOR_FIRST_EXIT:%.*]]
; CHECK-NEXT: br label [[FOR_SECOND:%.*]]
; CHECK: for.second:
; CHECK-NEXT: [[J:%.*]] = phi i64 [ [[ADD]], [[FOR_FIRST_EXIT]] ], [ [[INC_J:%.*]], [[FOR_SECOND]] ]
-; CHECK-NEXT: [[AJ:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[J]]
-; CHECK-NEXT: store i32 2, i32* [[AJ]], align 4
+; CHECK-NEXT: [[AJ:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[J]]
+; CHECK-NEXT: store i32 2, ptr [[AJ]], align 4
; CHECK-NEXT: [[INC_J]] = add nsw i64 [[J]], 1
; CHECK-NEXT: [[CMP_J:%.*]] = icmp slt i64 [[INC_J]], 100
; CHECK-NEXT: br i1 [[CMP_J]], label [[FOR_SECOND]], label [[FOR_SECOND_EXIT:%.*]]
for.first:
%i = phi i64 [ 0, %entry ], [ %inc.i, %for.first ]
- %Ai = getelementptr inbounds i32, i32* %A, i64 %i
- store i32 0, i32* %Ai, align 4
+ %Ai = getelementptr inbounds i32, ptr %A, i64 %i
+ store i32 0, ptr %Ai, align 4
%inc.i = add nsw i64 %i, 1
%cmp.i = icmp slt i64 %inc.i, 100
br i1 %cmp.i, label %for.first, label %for.first.exit
for.second:
%j = phi i64 [ %add, %for.first.exit ], [ %inc.j, %for.second ]
- %Aj = getelementptr inbounds i32, i32* %A, i64 %j
- store i32 2, i32* %Aj, align 4
+ %Aj = getelementptr inbounds i32, ptr %A, i64 %j
+ store i32 2, ptr %Aj, align 4
%inc.j = add nsw i64 %j, 1
%cmp.j = icmp slt i64 %inc.j, 100
br i1 %cmp.j, label %for.second, label %for.second.exit
; CHECK-NEXT: [[I2:%.*]] = phi i32 [ [[I_NEXT2:%.*]], [[BODY1]] ], [ 0, [[PRE1]] ]
; CHECK-NEXT: [[I_NEXT]] = add i32 1, [[I]]
; CHECK-NEXT: [[COND:%.*]] = icmp ne i32 [[I]], [[N:%.*]]
-; CHECK-NEXT: store i32 3, i32* [[PTR]], align 4
+; CHECK-NEXT: store i32 3, ptr [[PTR]], align 4
; CHECK-NEXT: [[I_NEXT2]] = add i32 1, [[I2]]
; CHECK-NEXT: [[COND2:%.*]] = icmp ne i32 [[I2]], [[N]]
; CHECK-NEXT: br i1 [[COND2]], label [[BODY1]], label [[EXIT:%.*]]
; CHECK: exit:
-; CHECK-NEXT: [[B:%.*]] = load i32, i32* [[PTR]], align 4
+; CHECK-NEXT: [[B:%.*]] = load i32, ptr [[PTR]], align 4
; CHECK-NEXT: ret void
;
pre1:
%i = phi i32 [%i_next, %body1], [0, %pre1]
%i_next = add i32 1, %i
%cond = icmp ne i32 %i, %N
- store i32 3, i32* %ptr
+ store i32 3, ptr %ptr
br i1 %cond, label %body1, label %pre2
pre2:
- %b = load i32, i32 * %ptr
+ %b = load i32, ptr %ptr
br label %body2
body2: ; preds = %pre2, %body2
; CHECK-NEXT: [[I2:%.*]] = phi i32 [ [[I_NEXT2:%.*]], [[BODY1]] ], [ 0, [[PRE1]] ]
; CHECK-NEXT: [[I_NEXT]] = add i32 1, [[I]]
; CHECK-NEXT: [[COND:%.*]] = icmp ne i32 [[I]], [[N:%.*]]
-; CHECK-NEXT: [[B:%.*]] = load i32, i32* [[PTR]], align 4
+; CHECK-NEXT: [[B:%.*]] = load i32, ptr [[PTR]], align 4
; CHECK-NEXT: [[I_NEXT2]] = add i32 1, [[I2]]
; CHECK-NEXT: [[COND2:%.*]] = icmp ne i32 [[I2]], [[N]]
; CHECK-NEXT: br i1 [[COND2]], label [[BODY1]], label [[EXIT:%.*]]
; CHECK: exit:
-; CHECK-NEXT: store i32 3, i32* [[PTR]], align 4
+; CHECK-NEXT: store i32 3, ptr [[PTR]], align 4
; CHECK-NEXT: ret void
;
pre1:
%i = phi i32 [%i_next, %body1], [0, %pre1]
%i_next = add i32 1, %i
%cond = icmp ne i32 %i, %N
- %b = load i32, i32 * %ptr
+ %b = load i32, ptr %ptr
br i1 %cond, label %body1, label %pre2
pre2:
- store i32 3, i32* %ptr
+ store i32 3, ptr %ptr
br label %body2
body2: ; preds = %pre2, %body2
inner1.body:
%iv112 = phi i64 [ %iv.next113, %inner1.body ], [ 0, %inner1.ph ]
- %idx12 = getelementptr inbounds [10 x [10 x [10 x i32]]], [10 x [10 x [10 x i32]]]* @a, i64 0, i64 %iv120, i64 %iv116, i64 %iv112
- %0 = load i32, i32* %idx12
+ %idx12 = getelementptr inbounds [10 x [10 x [10 x i32]]], ptr @a, i64 0, i64 %iv120, i64 %iv116, i64 %iv112
+ %0 = load i32, ptr %idx12
%add = add nsw i32 %0, 2
- %idx18 = getelementptr inbounds [10 x [10 x [10 x i32]]], [10 x [10 x [10 x i32]]]* @b, i64 0, i64 %iv120, i64 %iv116, i64 %iv112
- store i32 %add, i32* %idx18
+ %idx18 = getelementptr inbounds [10 x [10 x [10 x i32]]], ptr @b, i64 0, i64 %iv120, i64 %iv116, i64 %iv112
+ store i32 %add, ptr %idx18
%iv.next113 = add nuw nsw i64 %iv112, 1
%exitcond115 = icmp eq i64 %iv.next113, %wide.trip.count114
br i1 %exitcond115, label %inner1.exit, label %inner1.body
inner2.body:
%iv = phi i64 [ %iv.next, %inner2.body ], [ 0, %inner2.ph ]
- %idx45 = getelementptr inbounds [10 x [10 x [10 x i32]]], [10 x [10 x [10 x i32]]]* @a, i64 0, i64 %iv108, i64 %iv104, i64 %iv
- %1 = load i32, i32* %idx45
+ %idx45 = getelementptr inbounds [10 x [10 x [10 x i32]]], ptr @a, i64 0, i64 %iv108, i64 %iv104, i64 %iv
+ %1 = load i32, ptr %idx45
%mul = shl nsw i32 %1, 1
- %idx51 = getelementptr inbounds [10 x [10 x [10 x i32]]], [10 x [10 x [10 x i32]]]* @c, i64 0, i64 %iv108, i64 %iv104, i64 %iv
- store i32 %mul, i32* %idx51
+ %idx51 = getelementptr inbounds [10 x [10 x [10 x i32]]], ptr @c, i64 0, i64 %iv108, i64 %iv104, i64 %iv
+ store i32 %mul, ptr %idx51
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv.next, %wide.trip.count114
br i1 %exitcond, label %inner2.exit, label %inner2.body
; NOLZCNT-NOT: @llvm.ctlz
; Function Attrs: norecurse nounwind uwtable
-define i32 @ctlz_and_other(i32 %n, i8* nocapture %a) {
+define i32 @ctlz_and_other(i32 %n, ptr nocapture %a) {
entry:
%c = icmp sgt i32 %n, 0
%negn = sub nsw i32 0, %n
%and = and i32 %shl, %abs_n
%tobool1 = icmp ne i32 %and, 0
%conv = zext i1 %tobool1 to i8
- %arrayidx = getelementptr inbounds i8, i8* %a, i64 %indvars.iv
- store i8 %conv, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %a, i64 %indvars.iv
+ store i8 %conv, ptr %arrayidx, align 1
%indvars.iv.next = add nuw i64 %indvars.iv, 1
%shr = ashr i32 %shr11, 1
%tobool = icmp eq i32 %shr, 0
}
; Even both of them can liveout
-define void @p3_constant_mask_24thbit(i32 %x, i32* %p0, i32* %p1) {
+define void @p3_constant_mask_24thbit(i32 %x, ptr %p0, ptr %p1) {
; ALL-LABEL: @p3_constant_mask_24thbit(
; ALL-NEXT: entry:
; ALL-NEXT: [[X_MASKED:%.*]] = and i32 [[X:%.*]], 33554431, !dbg [[DBG61:![0-9]+]]
; ALL: end:
; ALL-NEXT: [[X_CURR_LCSSA:%.*]] = phi i32 [ [[X_CURR]], [[LOOP]] ], !dbg [[DBG61]]
; ALL-NEXT: [[X_NEXT_LCSSA:%.*]] = phi i32 [ [[X_NEXT]], [[LOOP]] ], !dbg [[DBG65]]
-; ALL-NEXT: store i32 [[X_CURR_LCSSA]], i32* [[P0:%.*]], align 4, !dbg [[DBG67:![0-9]+]]
-; ALL-NEXT: store i32 [[X_NEXT_LCSSA]], i32* [[P1:%.*]], align 4, !dbg [[DBG68:![0-9]+]]
+; ALL-NEXT: store i32 [[X_CURR_LCSSA]], ptr [[P0:%.*]], align 4, !dbg [[DBG67:![0-9]+]]
+; ALL-NEXT: store i32 [[X_NEXT_LCSSA]], ptr [[P1:%.*]], align 4, !dbg [[DBG68:![0-9]+]]
; ALL-NEXT: ret void, !dbg [[DBG69:![0-9]+]]
;
entry:
br i1 %x.curr.isbitunset, label %loop, label %end
end:
- store i32 %x.curr, i32* %p0
- store i32 %x.next, i32* %p1
+ store i32 %x.curr, ptr %p0
+ store i32 %x.next, ptr %p1
ret void
}
-define void @p4_constant_mask_15thbit(i32 %x, i32* %p0, i32* %p1) {
+define void @p4_constant_mask_15thbit(i32 %x, ptr %p0, ptr %p1) {
; ALL-LABEL: @p4_constant_mask_15thbit(
; ALL-NEXT: entry:
; ALL-NEXT: [[X_MASKED:%.*]] = and i32 [[X:%.*]], 65535, !dbg [[DBG76:![0-9]+]]
; ALL: end:
; ALL-NEXT: [[X_CURR_LCSSA:%.*]] = phi i32 [ [[X_CURR]], [[LOOP]] ], !dbg [[DBG76]]
; ALL-NEXT: [[X_NEXT_LCSSA:%.*]] = phi i32 [ [[X_NEXT]], [[LOOP]] ], !dbg [[DBG80]]
-; ALL-NEXT: store i32 [[X_CURR_LCSSA]], i32* [[P0:%.*]], align 4, !dbg [[DBG82:![0-9]+]]
-; ALL-NEXT: store i32 [[X_NEXT_LCSSA]], i32* [[P1:%.*]], align 4, !dbg [[DBG83:![0-9]+]]
+; ALL-NEXT: store i32 [[X_CURR_LCSSA]], ptr [[P0:%.*]], align 4, !dbg [[DBG82:![0-9]+]]
+; ALL-NEXT: store i32 [[X_NEXT_LCSSA]], ptr [[P1:%.*]], align 4, !dbg [[DBG83:![0-9]+]]
; ALL-NEXT: ret void, !dbg [[DBG84:![0-9]+]]
;
entry:
br i1 %x.curr.isbitunset, label %loop, label %end
end:
- store i32 %x.curr, i32* %p0
- store i32 %x.next, i32* %p1
+ store i32 %x.curr, ptr %p0
+ store i32 %x.next, ptr %p1
ret void
}
; All no-wrap flags can be kept on the shift.
-define void @p5_nuw(i32 %x, i32 %bit, i32* %p0, i32* %p1) {
+define void @p5_nuw(i32 %x, i32 %bit, ptr %p0, ptr %p1) {
; ALL-LABEL: @p5_nuw(
; ALL-NEXT: entry:
; ALL-NEXT: [[BITMASK:%.*]] = shl i32 1, [[BIT:%.*]], !dbg [[DBG92:![0-9]+]]
; ALL: end:
; ALL-NEXT: [[X_CURR_LCSSA:%.*]] = phi i32 [ [[X_CURR]], [[LOOP]] ], !dbg [[DBG93]]
; ALL-NEXT: [[X_NEXT_LCSSA:%.*]] = phi i32 [ [[X_NEXT]], [[LOOP]] ], !dbg [[DBG97]]
-; ALL-NEXT: store i32 [[X_CURR_LCSSA]], i32* [[P0:%.*]], align 4, !dbg [[DBG99:![0-9]+]]
-; ALL-NEXT: store i32 [[X_NEXT_LCSSA]], i32* [[P1:%.*]], align 4, !dbg [[DBG100:![0-9]+]]
+; ALL-NEXT: store i32 [[X_CURR_LCSSA]], ptr [[P0:%.*]], align 4, !dbg [[DBG99:![0-9]+]]
+; ALL-NEXT: store i32 [[X_NEXT_LCSSA]], ptr [[P1:%.*]], align 4, !dbg [[DBG100:![0-9]+]]
; ALL-NEXT: ret void, !dbg [[DBG101:![0-9]+]]
;
entry:
br i1 %x.curr.isbitunset, label %loop, label %end
end:
- store i32 %x.curr, i32* %p0
- store i32 %x.next, i32* %p1
+ store i32 %x.curr, ptr %p0
+ store i32 %x.next, ptr %p1
ret void
}
-define void @p6_nsw(i32 %x, i32 %bit, i32* %p0, i32* %p1) {
+define void @p6_nsw(i32 %x, i32 %bit, ptr %p0, ptr %p1) {
; ALL-LABEL: @p6_nsw(
; ALL-NEXT: entry:
; ALL-NEXT: [[BITMASK:%.*]] = shl i32 1, [[BIT:%.*]], !dbg [[DBG109:![0-9]+]]
; ALL: end:
; ALL-NEXT: [[X_CURR_LCSSA:%.*]] = phi i32 [ [[X_CURR]], [[LOOP]] ], !dbg [[DBG110]]
; ALL-NEXT: [[X_NEXT_LCSSA:%.*]] = phi i32 [ [[X_NEXT]], [[LOOP]] ], !dbg [[DBG114]]
-; ALL-NEXT: store i32 [[X_CURR_LCSSA]], i32* [[P0:%.*]], align 4, !dbg [[DBG116:![0-9]+]]
-; ALL-NEXT: store i32 [[X_NEXT_LCSSA]], i32* [[P1:%.*]], align 4, !dbg [[DBG117:![0-9]+]]
+; ALL-NEXT: store i32 [[X_CURR_LCSSA]], ptr [[P0:%.*]], align 4, !dbg [[DBG116:![0-9]+]]
+; ALL-NEXT: store i32 [[X_NEXT_LCSSA]], ptr [[P1:%.*]], align 4, !dbg [[DBG117:![0-9]+]]
; ALL-NEXT: ret void, !dbg [[DBG118:![0-9]+]]
;
entry:
br i1 %x.curr.isbitunset, label %loop, label %end
end:
- store i32 %x.curr, i32* %p0
- store i32 %x.next, i32* %p1
+ store i32 %x.curr, ptr %p0
+ store i32 %x.next, ptr %p1
ret void
}
-define void @p7_nuwnsw(i32 %x, i32 %bit, i32* %p0, i32* %p1) {
+define void @p7_nuwnsw(i32 %x, i32 %bit, ptr %p0, ptr %p1) {
; ALL-LABEL: @p7_nuwnsw(
; ALL-NEXT: entry:
; ALL-NEXT: [[BITMASK:%.*]] = shl i32 1, [[BIT:%.*]], !dbg [[DBG126:![0-9]+]]
; ALL: end:
; ALL-NEXT: [[X_CURR_LCSSA:%.*]] = phi i32 [ [[X_CURR]], [[LOOP]] ], !dbg [[DBG127]]
; ALL-NEXT: [[X_NEXT_LCSSA:%.*]] = phi i32 [ [[X_NEXT]], [[LOOP]] ], !dbg [[DBG131]]
-; ALL-NEXT: store i32 [[X_CURR_LCSSA]], i32* [[P0:%.*]], align 4, !dbg [[DBG133:![0-9]+]]
-; ALL-NEXT: store i32 [[X_NEXT_LCSSA]], i32* [[P1:%.*]], align 4, !dbg [[DBG134:![0-9]+]]
+; ALL-NEXT: store i32 [[X_CURR_LCSSA]], ptr [[P0:%.*]], align 4, !dbg [[DBG133:![0-9]+]]
+; ALL-NEXT: store i32 [[X_NEXT_LCSSA]], ptr [[P1:%.*]], align 4, !dbg [[DBG134:![0-9]+]]
; ALL-NEXT: ret void, !dbg [[DBG135:![0-9]+]]
;
entry:
br i1 %x.curr.isbitunset, label %loop, label %end
end:
- store i32 %x.curr, i32* %p0
- store i32 %x.next, i32* %p1
+ store i32 %x.curr, ptr %p0
+ store i32 %x.next, ptr %p1
ret void
}
-define void @p8_constant_mask_signbit_noncanonical(i32 %x, i32* %p0, i32* %p1) {
+define void @p8_constant_mask_signbit_noncanonical(i32 %x, ptr %p0, ptr %p1) {
; ALL-LABEL: @p8_constant_mask_signbit_noncanonical(
; ALL-NEXT: entry:
; ALL-NEXT: [[X_MASKED:%.*]] = and i32 [[X:%.*]], -1, !dbg [[DBG142:![0-9]+]]
; ALL: end:
; ALL-NEXT: [[X_CURR_LCSSA:%.*]] = phi i32 [ [[X_CURR]], [[LOOP]] ], !dbg [[DBG142]]
; ALL-NEXT: [[X_NEXT_LCSSA:%.*]] = phi i32 [ [[X_NEXT]], [[LOOP]] ], !dbg [[DBG146]]
-; ALL-NEXT: store i32 [[X_CURR_LCSSA]], i32* [[P0:%.*]], align 4, !dbg [[DBG148:![0-9]+]]
-; ALL-NEXT: store i32 [[X_NEXT_LCSSA]], i32* [[P1:%.*]], align 4, !dbg [[DBG149:![0-9]+]]
+; ALL-NEXT: store i32 [[X_CURR_LCSSA]], ptr [[P0:%.*]], align 4, !dbg [[DBG148:![0-9]+]]
+; ALL-NEXT: store i32 [[X_NEXT_LCSSA]], ptr [[P1:%.*]], align 4, !dbg [[DBG149:![0-9]+]]
; ALL-NEXT: ret void, !dbg [[DBG150:![0-9]+]]
;
entry:
br i1 %x.curr.isbitunset, label %loop, label %end
end:
- store i32 %x.curr, i32* %p0
- store i32 %x.next, i32* %p1
+ store i32 %x.curr, ptr %p0
+ store i32 %x.next, ptr %p1
ret void
}
-define void @p9_constant_mask_signbit_canonical(i32 %x, i32* %p0, i32* %p1) {
+define void @p9_constant_mask_signbit_canonical(i32 %x, ptr %p0, ptr %p1) {
; ALL-LABEL: @p9_constant_mask_signbit_canonical(
; ALL-NEXT: entry:
; ALL-NEXT: [[X_MASKED:%.*]] = and i32 [[X:%.*]], -1, !dbg [[DBG156:![0-9]+]]
; ALL: end:
; ALL-NEXT: [[X_CURR_LCSSA:%.*]] = phi i32 [ [[X_CURR]], [[LOOP]] ], !dbg [[DBG156]]
; ALL-NEXT: [[X_NEXT_LCSSA:%.*]] = phi i32 [ [[X_NEXT]], [[LOOP]] ], !dbg [[DBG159]]
-; ALL-NEXT: store i32 [[X_CURR_LCSSA]], i32* [[P0:%.*]], align 4, !dbg [[DBG161:![0-9]+]]
-; ALL-NEXT: store i32 [[X_NEXT_LCSSA]], i32* [[P1:%.*]], align 4, !dbg [[DBG162:![0-9]+]]
+; ALL-NEXT: store i32 [[X_CURR_LCSSA]], ptr [[P0:%.*]], align 4, !dbg [[DBG161:![0-9]+]]
+; ALL-NEXT: store i32 [[X_NEXT_LCSSA]], ptr [[P1:%.*]], align 4, !dbg [[DBG162:![0-9]+]]
; ALL-NEXT: ret void, !dbg [[DBG163:![0-9]+]]
;
entry:
br i1 %x.curr.isbitunset, label %loop, label %end
end:
- store i32 %x.curr, i32* %p0
- store i32 %x.next, i32* %p1
+ store i32 %x.curr, ptr %p0
+ store i32 %x.next, ptr %p1
ret void
}
-define void @p10_x_is_not_one(i32 %bit, i32* %p0, i32* %p1) {
+define void @p10_x_is_not_one(i32 %bit, ptr %p0, ptr %p1) {
; ALL-LABEL: @p10_x_is_not_one(
; ALL-NEXT: entry:
; ALL-NEXT: [[BITMASK:%.*]] = shl i32 1, [[BIT:%.*]], !dbg [[DBG171:![0-9]+]]
; ALL: end:
; ALL-NEXT: [[X_CURR_LCSSA:%.*]] = phi i32 [ [[X_CURR]], [[LOOP]] ], !dbg [[DBG172]]
; ALL-NEXT: [[X_NEXT_LCSSA:%.*]] = phi i32 [ [[X_NEXT]], [[LOOP]] ], !dbg [[DBG176]]
-; ALL-NEXT: store i32 [[X_CURR_LCSSA]], i32* [[P0:%.*]], align 4, !dbg [[DBG178:![0-9]+]]
-; ALL-NEXT: store i32 [[X_NEXT_LCSSA]], i32* [[P1:%.*]], align 4, !dbg [[DBG179:![0-9]+]]
+; ALL-NEXT: store i32 [[X_CURR_LCSSA]], ptr [[P0:%.*]], align 4, !dbg [[DBG178:![0-9]+]]
+; ALL-NEXT: store i32 [[X_NEXT_LCSSA]], ptr [[P1:%.*]], align 4, !dbg [[DBG179:![0-9]+]]
; ALL-NEXT: ret void, !dbg [[DBG180:![0-9]+]]
;
entry:
br i1 %x.curr.isbitunset, label %loop, label %end
end:
- store i32 %x.curr, i32* %p0
- store i32 %x.next, i32* %p1
+ store i32 %x.curr, ptr %p0
+ store i32 %x.next, ptr %p1
ret void
}
}
; Various weird bit widths
-define void @t35_i1(i1 %x, i1 %bit, i1* %p0, i1* %p1) {
+define void @t35_i1(i1 %x, i1 %bit, ptr %p0, ptr %p1) {
; LZCNT-LABEL: @t35_i1(
; LZCNT-NEXT: entry:
; LZCNT-NEXT: [[BITMASK:%.*]] = shl i1 true, [[BIT:%.*]], !dbg [[DBG539:![0-9]+]]
; LZCNT: end:
; LZCNT-NEXT: [[X_CURR_LCSSA:%.*]] = phi i1 [ [[X_CURR]], [[LOOP]] ], !dbg [[DBG540]]
; LZCNT-NEXT: [[X_NEXT_LCSSA:%.*]] = phi i1 [ [[X_NEXT]], [[LOOP]] ], !dbg [[DBG544]]
-; LZCNT-NEXT: store i1 [[X_CURR_LCSSA]], i1* [[P0:%.*]], align 1, !dbg [[DBG546:![0-9]+]]
-; LZCNT-NEXT: store i1 [[X_NEXT_LCSSA]], i1* [[P1:%.*]], align 1, !dbg [[DBG547:![0-9]+]]
+; LZCNT-NEXT: store i1 [[X_CURR_LCSSA]], ptr [[P0:%.*]], align 1, !dbg [[DBG546:![0-9]+]]
+; LZCNT-NEXT: store i1 [[X_NEXT_LCSSA]], ptr [[P1:%.*]], align 1, !dbg [[DBG547:![0-9]+]]
; LZCNT-NEXT: ret void, !dbg [[DBG548:![0-9]+]]
;
; NOLZCNT-LABEL: @t35_i1(
; NOLZCNT: end:
; NOLZCNT-NEXT: [[X_CURR_LCSSA:%.*]] = phi i1 [ [[X_CURR]], [[LOOP]] ], !dbg [[DBG541]]
; NOLZCNT-NEXT: [[X_NEXT_LCSSA:%.*]] = phi i1 [ [[X_NEXT]], [[LOOP]] ], !dbg [[DBG544]]
-; NOLZCNT-NEXT: store i1 [[X_CURR_LCSSA]], i1* [[P0:%.*]], align 1, !dbg [[DBG546:![0-9]+]]
-; NOLZCNT-NEXT: store i1 [[X_NEXT_LCSSA]], i1* [[P1:%.*]], align 1, !dbg [[DBG547:![0-9]+]]
+; NOLZCNT-NEXT: store i1 [[X_CURR_LCSSA]], ptr [[P0:%.*]], align 1, !dbg [[DBG546:![0-9]+]]
+; NOLZCNT-NEXT: store i1 [[X_NEXT_LCSSA]], ptr [[P1:%.*]], align 1, !dbg [[DBG547:![0-9]+]]
; NOLZCNT-NEXT: ret void, !dbg [[DBG548:![0-9]+]]
;
entry:
br i1 %x.curr.isbitunset, label %loop, label %end
end:
- store i1 %x.curr, i1* %p0
- store i1 %x.next, i1* %p1
+ store i1 %x.curr, ptr %p0
+ store i1 %x.next, ptr %p1
ret void
}
-define void @t36_i2(i2 %x, i2 %bit, i2* %p0, i2* %p1) {
+define void @t36_i2(i2 %x, i2 %bit, ptr %p0, ptr %p1) {
; LZCNT-LABEL: @t36_i2(
; LZCNT-NEXT: entry:
; LZCNT-NEXT: [[BITMASK:%.*]] = shl i2 1, [[BIT:%.*]], !dbg [[DBG556:![0-9]+]]
; LZCNT: end:
; LZCNT-NEXT: [[X_CURR_LCSSA:%.*]] = phi i2 [ [[X_CURR]], [[LOOP]] ], !dbg [[DBG557]]
; LZCNT-NEXT: [[X_NEXT_LCSSA:%.*]] = phi i2 [ [[X_NEXT]], [[LOOP]] ], !dbg [[DBG561]]
-; LZCNT-NEXT: store i2 [[X_CURR_LCSSA]], i2* [[P0:%.*]], align 1, !dbg [[DBG563:![0-9]+]]
-; LZCNT-NEXT: store i2 [[X_NEXT_LCSSA]], i2* [[P1:%.*]], align 1, !dbg [[DBG564:![0-9]+]]
+; LZCNT-NEXT: store i2 [[X_CURR_LCSSA]], ptr [[P0:%.*]], align 1, !dbg [[DBG563:![0-9]+]]
+; LZCNT-NEXT: store i2 [[X_NEXT_LCSSA]], ptr [[P1:%.*]], align 1, !dbg [[DBG564:![0-9]+]]
; LZCNT-NEXT: ret void, !dbg [[DBG565:![0-9]+]]
;
; NOLZCNT-LABEL: @t36_i2(
; NOLZCNT: end:
; NOLZCNT-NEXT: [[X_CURR_LCSSA:%.*]] = phi i2 [ [[X_CURR]], [[LOOP]] ], !dbg [[DBG558]]
; NOLZCNT-NEXT: [[X_NEXT_LCSSA:%.*]] = phi i2 [ [[X_NEXT]], [[LOOP]] ], !dbg [[DBG561]]
-; NOLZCNT-NEXT: store i2 [[X_CURR_LCSSA]], i2* [[P0:%.*]], align 1, !dbg [[DBG563:![0-9]+]]
-; NOLZCNT-NEXT: store i2 [[X_NEXT_LCSSA]], i2* [[P1:%.*]], align 1, !dbg [[DBG564:![0-9]+]]
+; NOLZCNT-NEXT: store i2 [[X_CURR_LCSSA]], ptr [[P0:%.*]], align 1, !dbg [[DBG563:![0-9]+]]
+; NOLZCNT-NEXT: store i2 [[X_NEXT_LCSSA]], ptr [[P1:%.*]], align 1, !dbg [[DBG564:![0-9]+]]
; NOLZCNT-NEXT: ret void, !dbg [[DBG565:![0-9]+]]
;
entry:
br i1 %x.curr.isbitunset, label %loop, label %end
end:
- store i2 %x.curr, i2* %p0
- store i2 %x.next, i2* %p1
+ store i2 %x.curr, ptr %p0
+ store i2 %x.next, ptr %p1
ret void
}
-define void @t37_i3(i3 %x, i3 %bit, i3* %p0, i3* %p1) {
+define void @t37_i3(i3 %x, i3 %bit, ptr %p0, ptr %p1) {
; LZCNT-LABEL: @t37_i3(
; LZCNT-NEXT: entry:
; LZCNT-NEXT: [[BITMASK:%.*]] = shl i3 1, [[BIT:%.*]], !dbg [[DBG573:![0-9]+]]
; LZCNT: end:
; LZCNT-NEXT: [[X_CURR_LCSSA:%.*]] = phi i3 [ [[X_CURR]], [[LOOP]] ], !dbg [[DBG574]]
; LZCNT-NEXT: [[X_NEXT_LCSSA:%.*]] = phi i3 [ [[X_NEXT]], [[LOOP]] ], !dbg [[DBG578]]
-; LZCNT-NEXT: store i3 [[X_CURR_LCSSA]], i3* [[P0:%.*]], align 1, !dbg [[DBG580:![0-9]+]]
-; LZCNT-NEXT: store i3 [[X_NEXT_LCSSA]], i3* [[P1:%.*]], align 1, !dbg [[DBG581:![0-9]+]]
+; LZCNT-NEXT: store i3 [[X_CURR_LCSSA]], ptr [[P0:%.*]], align 1, !dbg [[DBG580:![0-9]+]]
+; LZCNT-NEXT: store i3 [[X_NEXT_LCSSA]], ptr [[P1:%.*]], align 1, !dbg [[DBG581:![0-9]+]]
; LZCNT-NEXT: ret void, !dbg [[DBG582:![0-9]+]]
;
; NOLZCNT-LABEL: @t37_i3(
; NOLZCNT: end:
; NOLZCNT-NEXT: [[X_CURR_LCSSA:%.*]] = phi i3 [ [[X_CURR]], [[LOOP]] ], !dbg [[DBG575]]
; NOLZCNT-NEXT: [[X_NEXT_LCSSA:%.*]] = phi i3 [ [[X_NEXT]], [[LOOP]] ], !dbg [[DBG578]]
-; NOLZCNT-NEXT: store i3 [[X_CURR_LCSSA]], i3* [[P0:%.*]], align 1, !dbg [[DBG580:![0-9]+]]
-; NOLZCNT-NEXT: store i3 [[X_NEXT_LCSSA]], i3* [[P1:%.*]], align 1, !dbg [[DBG581:![0-9]+]]
+; NOLZCNT-NEXT: store i3 [[X_CURR_LCSSA]], ptr [[P0:%.*]], align 1, !dbg [[DBG580:![0-9]+]]
+; NOLZCNT-NEXT: store i3 [[X_NEXT_LCSSA]], ptr [[P1:%.*]], align 1, !dbg [[DBG581:![0-9]+]]
; NOLZCNT-NEXT: ret void, !dbg [[DBG582:![0-9]+]]
;
entry:
br i1 %x.curr.isbitunset, label %loop, label %end
end:
- store i3 %x.curr, i3* %p0
- store i3 %x.next, i3* %p1
+ store i3 %x.curr, ptr %p0
+ store i3 %x.next, ptr %p1
ret void
}
; CHECK-NEXT: bb.nph:
; CHECK-NEXT: [[BASE:%.*]] = alloca i8, i32 10000, align 1
; CHECK-NEXT: [[DEST:%.*]] = alloca i8, i32 10000, align 1
-; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[DEST]], i8* align 1 [[BASE]], i64 [[SIZE:%.*]], i32 1)
+; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[BASE]], i64 [[SIZE:%.*]], i32 1)
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
-; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[DEST]], i64 [[INDVAR]]
-; CHECK-NEXT: [[V:%.*]] = load atomic i8, i8* [[I_0_014]] unordered, align 1
+; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, ptr [[BASE]], i64 [[INDVAR]]
+; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[INDVAR]]
+; CHECK-NEXT: [[V:%.*]] = load atomic i8, ptr [[I_0_014]] unordered, align 1
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
- %DestI = getelementptr i8, i8* %Dest, i64 %indvar
- %V = load atomic i8, i8* %I.0.014 unordered, align 1
- store atomic i8 %V, i8* %DestI unordered, align 1
+ %I.0.014 = getelementptr i8, ptr %Base, i64 %indvar
+ %DestI = getelementptr i8, ptr %Dest, i64 %indvar
+ %V = load atomic i8, ptr %I.0.014 unordered, align 1
+ store atomic i8 %V, ptr %DestI unordered, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
; CHECK-NEXT: bb.nph:
; CHECK-NEXT: [[BASE:%.*]] = alloca i8, i32 10000, align 1
; CHECK-NEXT: [[DEST:%.*]] = alloca i8, i32 10000, align 1
-; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[DEST]], i8* align 1 [[BASE]], i64 [[SIZE:%.*]], i32 1)
+; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[BASE]], i64 [[SIZE:%.*]], i32 1)
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
-; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[DEST]], i64 [[INDVAR]]
-; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[I_0_014]], align 1
+; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, ptr [[BASE]], i64 [[INDVAR]]
+; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[INDVAR]]
+; CHECK-NEXT: [[V:%.*]] = load i8, ptr [[I_0_014]], align 1
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
- %DestI = getelementptr i8, i8* %Dest, i64 %indvar
- %V = load i8, i8* %I.0.014, align 1
- store atomic i8 %V, i8* %DestI unordered, align 1
+ %I.0.014 = getelementptr i8, ptr %Base, i64 %indvar
+ %DestI = getelementptr i8, ptr %Dest, i64 %indvar
+ %V = load i8, ptr %I.0.014, align 1
+ store atomic i8 %V, ptr %DestI unordered, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
; CHECK-NEXT: bb.nph:
; CHECK-NEXT: [[BASE:%.*]] = alloca i8, i32 10000, align 1
; CHECK-NEXT: [[DEST:%.*]] = alloca i8, i32 10000, align 1
-; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[DEST]], i8* align 1 [[BASE]], i64 [[SIZE:%.*]], i32 1)
+; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[BASE]], i64 [[SIZE:%.*]], i32 1)
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
-; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[DEST]], i64 [[INDVAR]]
-; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[I_0_014]], align 1
+; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, ptr [[BASE]], i64 [[INDVAR]]
+; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[INDVAR]]
+; CHECK-NEXT: [[V:%.*]] = load i8, ptr [[I_0_014]], align 1
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
- %DestI = getelementptr i8, i8* %Dest, i64 %indvar
- %V = load i8, i8* %I.0.014
- store atomic i8 %V, i8* %DestI unordered, align 1
+ %I.0.014 = getelementptr i8, ptr %Base, i64 %indvar
+ %DestI = getelementptr i8, ptr %Dest, i64 %indvar
+ %V = load i8, ptr %I.0.014
+ store atomic i8 %V, ptr %DestI unordered, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i32, i32* [[BASE]], i64 [[INDVAR]]
-; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i32, i32* [[DEST]], i64 [[INDVAR]]
-; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[I_0_014]], align 2
-; CHECK-NEXT: store atomic i32 [[V]], i32* [[DESTI]] unordered, align 4
+; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i32, ptr [[BASE]], i64 [[INDVAR]]
+; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i32, ptr [[DEST]], i64 [[INDVAR]]
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[I_0_014]], align 2
+; CHECK-NEXT: store atomic i32 [[V]], ptr [[DESTI]] unordered, align 4
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar
- %DestI = getelementptr i32, i32* %Dest, i64 %indvar
- %V = load i32, i32* %I.0.014, align 2
- store atomic i32 %V, i32* %DestI unordered, align 4
+ %I.0.014 = getelementptr i32, ptr %Base, i64 %indvar
+ %DestI = getelementptr i32, ptr %Dest, i64 %indvar
+ %V = load i32, ptr %I.0.014, align 2
+ store atomic i32 %V, ptr %DestI unordered, align 4
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i32, i32* [[BASE]], i64 [[INDVAR]]
-; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i32, i32* [[DEST]], i64 [[INDVAR]]
-; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[I_0_014]], align 4
-; CHECK-NEXT: store atomic i32 [[V]], i32* [[DESTI]] unordered, align 2
+; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i32, ptr [[BASE]], i64 [[INDVAR]]
+; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i32, ptr [[DEST]], i64 [[INDVAR]]
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[I_0_014]], align 4
+; CHECK-NEXT: store atomic i32 [[V]], ptr [[DESTI]] unordered, align 2
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar
- %DestI = getelementptr i32, i32* %Dest, i64 %indvar
- %V = load i32, i32* %I.0.014, align 4
- store atomic i32 %V, i32* %DestI unordered, align 2
+ %I.0.014 = getelementptr i32, ptr %Base, i64 %indvar
+ %DestI = getelementptr i32, ptr %Dest, i64 %indvar
+ %V = load i32, ptr %I.0.014, align 4
+ store atomic i32 %V, ptr %DestI unordered, align 2
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
; CHECK-NEXT: bb.nph:
; CHECK-NEXT: [[BASE:%.*]] = alloca i8, i32 10000, align 1
; CHECK-NEXT: [[DEST:%.*]] = alloca i8, i32 10000, align 1
-; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[DEST]], i8* align 1 [[BASE]], i64 [[SIZE:%.*]], i32 1)
+; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[BASE]], i64 [[SIZE:%.*]], i32 1)
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
-; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[DEST]], i64 [[INDVAR]]
-; CHECK-NEXT: [[V:%.*]] = load atomic i8, i8* [[I_0_014]] unordered, align 1
+; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, ptr [[BASE]], i64 [[INDVAR]]
+; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[INDVAR]]
+; CHECK-NEXT: [[V:%.*]] = load atomic i8, ptr [[I_0_014]] unordered, align 1
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
- %DestI = getelementptr i8, i8* %Dest, i64 %indvar
- %V = load atomic i8, i8* %I.0.014 unordered, align 1
- store i8 %V, i8* %DestI, align 1
+ %I.0.014 = getelementptr i8, ptr %Base, i64 %indvar
+ %DestI = getelementptr i8, ptr %Dest, i64 %indvar
+ %V = load atomic i8, ptr %I.0.014 unordered, align 1
+ store i8 %V, ptr %DestI, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
; CHECK-NEXT: bb.nph:
; CHECK-NEXT: [[BASE:%.*]] = alloca i8, i32 10000, align 1
; CHECK-NEXT: [[DEST:%.*]] = alloca i8, i32 10000, align 1
-; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[DEST]], i8* align 1 [[BASE]], i64 [[SIZE:%.*]], i32 1)
+; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[BASE]], i64 [[SIZE:%.*]], i32 1)
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
-; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[DEST]], i64 [[INDVAR]]
-; CHECK-NEXT: [[V:%.*]] = load atomic i8, i8* [[I_0_014]] unordered, align 1
+; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, ptr [[BASE]], i64 [[INDVAR]]
+; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[INDVAR]]
+; CHECK-NEXT: [[V:%.*]] = load atomic i8, ptr [[I_0_014]] unordered, align 1
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
- %DestI = getelementptr i8, i8* %Dest, i64 %indvar
- %V = load atomic i8, i8* %I.0.014 unordered, align 1
- store i8 %V, i8* %DestI
+ %I.0.014 = getelementptr i8, ptr %Base, i64 %indvar
+ %DestI = getelementptr i8, ptr %Dest, i64 %indvar
+ %V = load atomic i8, ptr %I.0.014 unordered, align 1
+ store i8 %V, ptr %DestI
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i32, i32* [[BASE]], i64 [[INDVAR]]
-; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i32, i32* [[DEST]], i64 [[INDVAR]]
-; CHECK-NEXT: [[V:%.*]] = load atomic i32, i32* [[I_0_014]] unordered, align 2
-; CHECK-NEXT: store i32 [[V]], i32* [[DESTI]], align 4
+; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i32, ptr [[BASE]], i64 [[INDVAR]]
+; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i32, ptr [[DEST]], i64 [[INDVAR]]
+; CHECK-NEXT: [[V:%.*]] = load atomic i32, ptr [[I_0_014]] unordered, align 2
+; CHECK-NEXT: store i32 [[V]], ptr [[DESTI]], align 4
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar
- %DestI = getelementptr i32, i32* %Dest, i64 %indvar
- %V = load atomic i32, i32* %I.0.014 unordered, align 2
- store i32 %V, i32* %DestI, align 4
+ %I.0.014 = getelementptr i32, ptr %Base, i64 %indvar
+ %DestI = getelementptr i32, ptr %Dest, i64 %indvar
+ %V = load atomic i32, ptr %I.0.014 unordered, align 2
+ store i32 %V, ptr %DestI, align 4
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i32, i32* [[BASE]], i64 [[INDVAR]]
-; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i32, i32* [[DEST]], i64 [[INDVAR]]
-; CHECK-NEXT: [[V:%.*]] = load atomic i32, i32* [[I_0_014]] unordered, align 4
-; CHECK-NEXT: store i32 [[V]], i32* [[DESTI]], align 2
+; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i32, ptr [[BASE]], i64 [[INDVAR]]
+; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i32, ptr [[DEST]], i64 [[INDVAR]]
+; CHECK-NEXT: [[V:%.*]] = load atomic i32, ptr [[I_0_014]] unordered, align 4
+; CHECK-NEXT: store i32 [[V]], ptr [[DESTI]], align 2
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar
- %DestI = getelementptr i32, i32* %Dest, i64 %indvar
- %V = load atomic i32, i32* %I.0.014 unordered, align 4
- store i32 %V, i32* %DestI, align 2
+ %I.0.014 = getelementptr i32, ptr %Base, i64 %indvar
+ %DestI = getelementptr i32, ptr %Dest, i64 %indvar
+ %V = load atomic i32, ptr %I.0.014 unordered, align 4
+ store i32 %V, ptr %DestI, align 2
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
-; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[DEST]], i64 [[INDVAR]]
-; CHECK-NEXT: [[V:%.*]] = load atomic i8, i8* [[I_0_014]] unordered, align 1
-; CHECK-NEXT: store atomic i8 [[V]], i8* [[DESTI]] monotonic, align 1
+; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, ptr [[BASE]], i64 [[INDVAR]]
+; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[INDVAR]]
+; CHECK-NEXT: [[V:%.*]] = load atomic i8, ptr [[I_0_014]] unordered, align 1
+; CHECK-NEXT: store atomic i8 [[V]], ptr [[DESTI]] monotonic, align 1
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
- %DestI = getelementptr i8, i8* %Dest, i64 %indvar
- %V = load atomic i8, i8* %I.0.014 unordered, align 1
- store atomic i8 %V, i8* %DestI monotonic, align 1
+ %I.0.014 = getelementptr i8, ptr %Base, i64 %indvar
+ %DestI = getelementptr i8, ptr %Dest, i64 %indvar
+ %V = load atomic i8, ptr %I.0.014 unordered, align 1
+ store atomic i8 %V, ptr %DestI monotonic, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
-; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[DEST]], i64 [[INDVAR]]
-; CHECK-NEXT: [[V:%.*]] = load atomic i8, i8* [[I_0_014]] monotonic, align 1
-; CHECK-NEXT: store atomic i8 [[V]], i8* [[DESTI]] unordered, align 1
+; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, ptr [[BASE]], i64 [[INDVAR]]
+; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[INDVAR]]
+; CHECK-NEXT: [[V:%.*]] = load atomic i8, ptr [[I_0_014]] monotonic, align 1
+; CHECK-NEXT: store atomic i8 [[V]], ptr [[DESTI]] unordered, align 1
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
- %DestI = getelementptr i8, i8* %Dest, i64 %indvar
- %V = load atomic i8, i8* %I.0.014 monotonic, align 1
- store atomic i8 %V, i8* %DestI unordered, align 1
+ %I.0.014 = getelementptr i8, ptr %Base, i64 %indvar
+ %DestI = getelementptr i8, ptr %Dest, i64 %indvar
+ %V = load atomic i8, ptr %I.0.014 monotonic, align 1
+ store atomic i8 %V, ptr %DestI unordered, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
; CHECK-LABEL: @test6(
; CHECK-NEXT: bb.nph:
; CHECK-NEXT: [[BASE:%.*]] = alloca i16, i32 10000, align 2
-; CHECK-NEXT: [[BASE2:%.*]] = bitcast i16* [[BASE]] to i8*
; CHECK-NEXT: [[DEST:%.*]] = alloca i16, i32 10000, align 2
-; CHECK-NEXT: [[DEST1:%.*]] = bitcast i16* [[DEST]] to i8*
; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[SIZE:%.*]], 1
-; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 2 [[DEST1]], i8* align 2 [[BASE2]], i64 [[TMP0]], i32 2)
+; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 2 [[DEST]], ptr align 2 [[BASE]], i64 [[TMP0]], i32 2)
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i16, i16* [[BASE]], i64 [[INDVAR]]
-; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i16, i16* [[DEST]], i64 [[INDVAR]]
-; CHECK-NEXT: [[V:%.*]] = load atomic i16, i16* [[I_0_014]] unordered, align 2
+; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i16, ptr [[BASE]], i64 [[INDVAR]]
+; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i16, ptr [[DEST]], i64 [[INDVAR]]
+; CHECK-NEXT: [[V:%.*]] = load atomic i16, ptr [[I_0_014]] unordered, align 2
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i16, i16* %Base, i64 %indvar
- %DestI = getelementptr i16, i16* %Dest, i64 %indvar
- %V = load atomic i16, i16* %I.0.014 unordered, align 2
- store atomic i16 %V, i16* %DestI unordered, align 2
+ %I.0.014 = getelementptr i16, ptr %Base, i64 %indvar
+ %DestI = getelementptr i16, ptr %Dest, i64 %indvar
+ %V = load atomic i16, ptr %I.0.014 unordered, align 2
+ store atomic i16 %V, ptr %DestI unordered, align 2
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
; CHECK-LABEL: @test7(
; CHECK-NEXT: bb.nph:
; CHECK-NEXT: [[BASE:%.*]] = alloca i32, i32 10000, align 4
-; CHECK-NEXT: [[BASE2:%.*]] = bitcast i32* [[BASE]] to i8*
; CHECK-NEXT: [[DEST:%.*]] = alloca i32, i32 10000, align 4
-; CHECK-NEXT: [[DEST1:%.*]] = bitcast i32* [[DEST]] to i8*
; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[SIZE:%.*]], 2
-; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 4 [[DEST1]], i8* align 4 [[BASE2]], i64 [[TMP0]], i32 4)
+; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[BASE]], i64 [[TMP0]], i32 4)
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i32, i32* [[BASE]], i64 [[INDVAR]]
-; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i32, i32* [[DEST]], i64 [[INDVAR]]
-; CHECK-NEXT: [[V:%.*]] = load atomic i32, i32* [[I_0_014]] unordered, align 4
+; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i32, ptr [[BASE]], i64 [[INDVAR]]
+; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i32, ptr [[DEST]], i64 [[INDVAR]]
+; CHECK-NEXT: [[V:%.*]] = load atomic i32, ptr [[I_0_014]] unordered, align 4
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar
- %DestI = getelementptr i32, i32* %Dest, i64 %indvar
- %V = load atomic i32, i32* %I.0.014 unordered, align 4
- store atomic i32 %V, i32* %DestI unordered, align 4
+ %I.0.014 = getelementptr i32, ptr %Base, i64 %indvar
+ %DestI = getelementptr i32, ptr %Dest, i64 %indvar
+ %V = load atomic i32, ptr %I.0.014 unordered, align 4
+ store atomic i32 %V, ptr %DestI unordered, align 4
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
; CHECK-LABEL: @test8(
; CHECK-NEXT: bb.nph:
; CHECK-NEXT: [[BASE:%.*]] = alloca i64, i32 10000, align 8
-; CHECK-NEXT: [[BASE2:%.*]] = bitcast i64* [[BASE]] to i8*
; CHECK-NEXT: [[DEST:%.*]] = alloca i64, i32 10000, align 8
-; CHECK-NEXT: [[DEST1:%.*]] = bitcast i64* [[DEST]] to i8*
; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[SIZE:%.*]], 3
-; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 8 [[DEST1]], i8* align 8 [[BASE2]], i64 [[TMP0]], i32 8)
+; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 8 [[DEST]], ptr align 8 [[BASE]], i64 [[TMP0]], i32 8)
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i64, i64* [[BASE]], i64 [[INDVAR]]
-; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i64, i64* [[DEST]], i64 [[INDVAR]]
-; CHECK-NEXT: [[V:%.*]] = load atomic i64, i64* [[I_0_014]] unordered, align 8
+; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i64, ptr [[BASE]], i64 [[INDVAR]]
+; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i64, ptr [[DEST]], i64 [[INDVAR]]
+; CHECK-NEXT: [[V:%.*]] = load atomic i64, ptr [[I_0_014]] unordered, align 8
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i64, i64* %Base, i64 %indvar
- %DestI = getelementptr i64, i64* %Dest, i64 %indvar
- %V = load atomic i64, i64* %I.0.014 unordered, align 8
- store atomic i64 %V, i64* %DestI unordered, align 8
+ %I.0.014 = getelementptr i64, ptr %Base, i64 %indvar
+ %DestI = getelementptr i64, ptr %Dest, i64 %indvar
+ %V = load atomic i64, ptr %I.0.014 unordered, align 8
+ store atomic i64 %V, ptr %DestI unordered, align 8
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
; CHECK-LABEL: @test9(
; CHECK-NEXT: bb.nph:
; CHECK-NEXT: [[BASE:%.*]] = alloca i128, i32 10000, align 8
-; CHECK-NEXT: [[BASE2:%.*]] = bitcast i128* [[BASE]] to i8*
; CHECK-NEXT: [[DEST:%.*]] = alloca i128, i32 10000, align 8
-; CHECK-NEXT: [[DEST1:%.*]] = bitcast i128* [[DEST]] to i8*
; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[SIZE:%.*]], 4
-; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 16 [[DEST1]], i8* align 16 [[BASE2]], i64 [[TMP0]], i32 16)
+; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 16 [[DEST]], ptr align 16 [[BASE]], i64 [[TMP0]], i32 16)
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i128, i128* [[BASE]], i64 [[INDVAR]]
-; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i128, i128* [[DEST]], i64 [[INDVAR]]
-; CHECK-NEXT: [[V:%.*]] = load atomic i128, i128* [[I_0_014]] unordered, align 16
+; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i128, ptr [[BASE]], i64 [[INDVAR]]
+; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i128, ptr [[DEST]], i64 [[INDVAR]]
+; CHECK-NEXT: [[V:%.*]] = load atomic i128, ptr [[I_0_014]] unordered, align 16
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i128, i128* %Base, i64 %indvar
- %DestI = getelementptr i128, i128* %Dest, i64 %indvar
- %V = load atomic i128, i128* %I.0.014 unordered, align 16
- store atomic i128 %V, i128* %DestI unordered, align 16
+ %I.0.014 = getelementptr i128, ptr %Base, i64 %indvar
+ %DestI = getelementptr i128, ptr %Dest, i64 %indvar
+ %V = load atomic i128, ptr %I.0.014 unordered, align 16
+ store atomic i128 %V, ptr %DestI unordered, align 16
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i256, i256* [[BASE]], i64 [[INDVAR]]
-; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i256, i256* [[DEST]], i64 [[INDVAR]]
-; CHECK-NEXT: [[V:%.*]] = load atomic i256, i256* [[I_0_014]] unordered, align 32
-; CHECK-NEXT: store atomic i256 [[V]], i256* [[DESTI]] unordered, align 32
+; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i256, ptr [[BASE]], i64 [[INDVAR]]
+; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i256, ptr [[DEST]], i64 [[INDVAR]]
+; CHECK-NEXT: [[V:%.*]] = load atomic i256, ptr [[I_0_014]] unordered, align 32
+; CHECK-NEXT: store atomic i256 [[V]], ptr [[DESTI]] unordered, align 32
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i256, i256* %Base, i64 %indvar
- %DestI = getelementptr i256, i256* %Dest, i64 %indvar
- %V = load atomic i256, i256* %I.0.014 unordered, align 32
- store atomic i256 %V, i256* %DestI unordered, align 32
+ %I.0.014 = getelementptr i256, ptr %Base, i64 %indvar
+ %DestI = getelementptr i256, ptr %Dest, i64 %indvar
+ %V = load atomic i256, ptr %I.0.014 unordered, align 32
+ store atomic i256 %V, ptr %DestI unordered, align 32
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
; Make sure that atomic memset doesn't get recognized by mistake
-define void @test_nomemset(i8* %Base, i64 %Size) nounwind ssp {
+define void @test_nomemset(ptr %Base, i64 %Size) nounwind ssp {
; CHECK-LABEL: @test_nomemset(
; CHECK-NEXT: bb.nph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE:%.*]], i64 [[INDVAR]]
-; CHECK-NEXT: store atomic i8 0, i8* [[I_0_014]] unordered, align 1
+; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[INDVAR]]
+; CHECK-NEXT: store atomic i8 0, ptr [[I_0_014]] unordered, align 1
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
- store atomic i8 0, i8* %I.0.014 unordered, align 1
+ %I.0.014 = getelementptr i8, ptr %Base, i64 %indvar
+ store atomic i8 0, ptr %I.0.014 unordered, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
; Verify that unordered memset_pattern isn't recognized.
; This is a replica of test11_pattern from basic.ll
-define void @test_nomemset_pattern(i32* nocapture %P) nounwind ssp {
+define void @test_nomemset_pattern(ptr nocapture %P) nounwind ssp {
; CHECK-LABEL: @test_nomemset_pattern(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, i32* [[P:%.*]], i64 [[INDVAR]]
-; CHECK-NEXT: store atomic i32 1, i32* [[ARRAYIDX]] unordered, align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[INDVAR]]
+; CHECK-NEXT: store atomic i32 1, ptr [[ARRAYIDX]] unordered, align 4
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 10000
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %entry, %for.body
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ]
- %arrayidx = getelementptr i32, i32* %P, i64 %indvar
- store atomic i32 1, i32* %arrayidx unordered, align 4
+ %arrayidx = getelementptr i32, ptr %P, i64 %indvar
+ store atomic i32 1, ptr %arrayidx unordered, align 4
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, 10000
br i1 %exitcond, label %for.end, label %for.body
; Make sure that atomic memcpy or memmove don't get recognized by mistake
; when looping with positive stride
-define void @test_no_memcpy_memmove1(i8* %Src, i64 %Size) {
+define void @test_no_memcpy_memmove1(ptr %Src, i64 %Size) {
; CHECK-LABEL: @test_no_memcpy_memmove1(
; CHECK-NEXT: bb.nph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], 1
-; CHECK-NEXT: [[SRCI:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 [[STEP]]
-; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[INDVAR]]
-; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[SRCI]], align 1
-; CHECK-NEXT: store atomic i8 [[V]], i8* [[DESTI]] unordered, align 1
+; CHECK-NEXT: [[SRCI:%.*]] = getelementptr i8, ptr [[SRC:%.*]], i64 [[STEP]]
+; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[INDVAR]]
+; CHECK-NEXT: [[V:%.*]] = load i8, ptr [[SRCI]], align 1
+; CHECK-NEXT: store atomic i8 [[V]], ptr [[DESTI]] unordered, align 1
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
%Step = add nuw nsw i64 %indvar, 1
- %SrcI = getelementptr i8, i8* %Src, i64 %Step
- %DestI = getelementptr i8, i8* %Src, i64 %indvar
- %V = load i8, i8* %SrcI, align 1
- store atomic i8 %V, i8* %DestI unordered, align 1
+ %SrcI = getelementptr i8, ptr %Src, i64 %Step
+ %DestI = getelementptr i8, ptr %Src, i64 %indvar
+ %V = load i8, ptr %SrcI, align 1
+ store atomic i8 %V, ptr %DestI unordered, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
; Make sure that atomic memcpy or memmove don't get recognized by mistake
; when looping with negative stride
-define void @test_no_memcpy_memmove2(i8* %Src, i64 %Size) {
+define void @test_no_memcpy_memmove2(ptr %Src, i64 %Size) {
; CHECK-LABEL: @test_no_memcpy_memmove2(
; CHECK-NEXT: bb.nph:
; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i64 [[SIZE:%.*]], 0
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[STEP:%.*]], [[FOR_BODY]] ], [ [[SIZE]], [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: [[STEP]] = add nsw i64 [[INDVAR]], -1
-; CHECK-NEXT: [[SRCI:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[STEP]]
-; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[SRCI]], align 1
-; CHECK-NEXT: [[DESTI:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[INDVAR]]
-; CHECK-NEXT: store atomic i8 [[V]], i8* [[DESTI]] unordered, align 1
+; CHECK-NEXT: [[SRCI:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[STEP]]
+; CHECK-NEXT: [[V:%.*]] = load i8, ptr [[SRCI]], align 1
+; CHECK-NEXT: [[DESTI:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDVAR]]
+; CHECK-NEXT: store atomic i8 [[V]], ptr [[DESTI]] unordered, align 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp sgt i64 [[INDVAR]], 1
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT:%.*]]
; CHECK: for.end.loopexit:
for.body: ; preds = %bb.nph, %.for.body
%indvar = phi i64 [ %Step, %for.body ], [ %Size, %bb.nph ]
%Step = add nsw i64 %indvar, -1
- %SrcI = getelementptr inbounds i8, i8* %Src, i64 %Step
- %V = load i8, i8* %SrcI, align 1
- %DestI = getelementptr inbounds i8, i8* %Src, i64 %indvar
- store atomic i8 %V, i8* %DestI unordered, align 1
+ %SrcI = getelementptr inbounds i8, ptr %Src, i64 %Step
+ %V = load i8, ptr %SrcI, align 1
+ %DestI = getelementptr inbounds i8, ptr %Src, i64 %indvar
+ store atomic i8 %V, ptr %DestI unordered, align 1
%exitcond = icmp sgt i64 %indvar, 1
br i1 %exitcond, label %for.body, label %for.end
target triple = "x86_64-apple-darwin10.0.0"
; Two dimensional nested loop should be promoted to one big memset.
-define void @test10(i8 addrspace(2)* %X) nounwind ssp {
+define void @test10(ptr addrspace(2) %X) nounwind ssp {
; CHECK-LABEL: @test10(
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.memset.p2i8.i16(i8 addrspace(2)* align 1 [[X:%.*]], i8 0, i16 10000, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p2.i16(ptr addrspace(2) align 1 [[X:%.*]], i8 0, i16 10000, i1 false)
; CHECK-NEXT: br label [[BB_NPH:%.*]]
; CHECK: bb.nph:
; CHECK-NEXT: [[I_04:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[INC12:%.*]], [[FOR_INC10:%.*]] ]
; CHECK-NEXT: [[TMP0:%.*]] = mul nuw nsw i16 [[I_04]], 100
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8 addrspace(2)* [[X]], i16 [[TMP0]]
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr addrspace(2) [[X]], i16 [[TMP0]]
; CHECK-NEXT: br label [[FOR_BODY5:%.*]]
; CHECK: for.body5:
; CHECK-NEXT: [[J_02:%.*]] = phi i16 [ 0, [[BB_NPH]] ], [ [[INC:%.*]], [[FOR_BODY5]] ]
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i16 [[I_04]], 100
; CHECK-NEXT: [[ADD:%.*]] = add nsw i16 [[J_02]], [[MUL]]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8 addrspace(2)* [[X]], i16 [[ADD]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr addrspace(2) [[X]], i16 [[ADD]]
; CHECK-NEXT: [[INC]] = add nsw i16 [[J_02]], 1
; CHECK-NEXT: [[CMP4:%.*]] = icmp eq i16 [[INC]], 100
; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_INC10]], label [[FOR_BODY5]]
%j.02 = phi i16 [ 0, %bb.nph ], [ %inc, %for.body5 ]
%mul = mul nsw i16 %i.04, 100
%add = add nsw i16 %j.02, %mul
- %arrayidx = getelementptr inbounds i8, i8 addrspace(2)* %X, i16 %add
- store i8 0, i8 addrspace(2)* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr addrspace(2) %X, i16 %add
+ store i8 0, ptr addrspace(2) %arrayidx, align 1
%inc = add nsw i16 %j.02, 1
%cmp4 = icmp eq i16 %inc, 100
br i1 %cmp4, label %for.inc10, label %for.body5
ret void
}
-define void @test11_pattern(i32 addrspace(2)* nocapture %P) nounwind ssp {
+define void @test11_pattern(ptr addrspace(2) nocapture %P) nounwind ssp {
; CHECK-LABEL: @test11_pattern(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, i32 addrspace(2)* [[P:%.*]], i64 [[INDVAR]]
-; CHECK-NEXT: store i32 1, i32 addrspace(2)* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, ptr addrspace(2) [[P:%.*]], i64 [[INDVAR]]
+; CHECK-NEXT: store i32 1, ptr addrspace(2) [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 10000
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %entry, %for.body
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ]
- %arrayidx = getelementptr i32, i32 addrspace(2)* %P, i64 %indvar
- store i32 1, i32 addrspace(2)* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr addrspace(2) %P, i64 %indvar
+ store i32 1, ptr addrspace(2) %arrayidx, align 4
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, 10000
br i1 %exitcond, label %for.end, label %for.body
; CHECK-NEXT: [[TMP5:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], 4
; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[ADD]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [7 x i32], [7 x i32] addrspace(2)* @g_50, i32 0, i64 [[IDXPROM]]
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32 addrspace(2)* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [7 x i32], ptr addrspace(2) @g_50, i32 0, i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(2) [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], 5
; CHECK-NEXT: [[IDXPROM5:%.*]] = sext i32 [[ADD4]] to i64
-; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [7 x i32], [7 x i32] addrspace(2)* @g_50, i32 0, i64 [[IDXPROM5]]
-; CHECK-NEXT: store i32 [[TMP2]], i32 addrspace(2)* [[ARRAYIDX6]], align 4
+; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [7 x i32], ptr addrspace(2) @g_50, i32 0, i64 [[IDXPROM5]]
+; CHECK-NEXT: store i32 [[TMP2]], ptr addrspace(2) [[ARRAYIDX6]], align 4
; CHECK-NEXT: [[INC]] = add nsw i32 [[TMP5]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[INC]], 2
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
; CHECK: for.end:
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32 addrspace(2)* getelementptr inbounds ([7 x i32], [7 x i32] addrspace(2)* @g_50, i32 0, i64 6), align 4
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr addrspace(2) getelementptr inbounds ([7 x i32], ptr addrspace(2) @g_50, i32 0, i64 6), align 4
; CHECK-NEXT: ret i32 [[TMP8]]
;
%tmp5 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%add = add nsw i32 %tmp5, 4
%idxprom = sext i32 %add to i64
- %arrayidx = getelementptr inbounds [7 x i32], [7 x i32] addrspace(2)* @g_50, i32 0, i64 %idxprom
- %tmp2 = load i32, i32 addrspace(2)* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [7 x i32], ptr addrspace(2) @g_50, i32 0, i64 %idxprom
+ %tmp2 = load i32, ptr addrspace(2) %arrayidx, align 4
%add4 = add nsw i32 %tmp5, 5
%idxprom5 = sext i32 %add4 to i64
- %arrayidx6 = getelementptr inbounds [7 x i32], [7 x i32] addrspace(2)* @g_50, i32 0, i64 %idxprom5
- store i32 %tmp2, i32 addrspace(2)* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds [7 x i32], ptr addrspace(2) @g_50, i32 0, i64 %idxprom5
+ store i32 %tmp2, ptr addrspace(2) %arrayidx6, align 4
%inc = add nsw i32 %tmp5, 1
%cmp = icmp slt i32 %inc, 2
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.inc
- %tmp8 = load i32, i32 addrspace(2)* getelementptr inbounds ([7 x i32], [7 x i32] addrspace(2)* @g_50, i32 0, i64 6), align 4
+ %tmp8 = load i32, ptr addrspace(2) getelementptr inbounds ([7 x i32], ptr addrspace(2) @g_50, i32 0, i64 6), align 4
ret i32 %tmp8
}
; Don't crash inside DependenceAnalysis
; PR14219
-define void @test1(i64* %iwork, i64 %x) {
+define void @test1(ptr %iwork, i64 %x) {
bb0:
%mul116 = mul nsw i64 %x, %x
%incdec.ptr6.sum175 = add i64 42, %x
- %arrayidx135 = getelementptr inbounds i64, i64* %iwork, i64 %incdec.ptr6.sum175
+ %arrayidx135 = getelementptr inbounds i64, ptr %iwork, i64 %incdec.ptr6.sum175
br label %bb1
bb1:
%storemerge4226 = phi i64 [ 0, %bb0 ], [ %inc139, %bb1 ]
- store i64 1, i64* %arrayidx135, align 8
+ store i64 1, ptr %arrayidx135, align 8
%incdec.ptr6.sum176 = add i64 %mul116, %storemerge4226
- %arrayidx137 = getelementptr inbounds i64, i64* %iwork, i64 %incdec.ptr6.sum176
- store i64 1, i64* %arrayidx137, align 8
+ %arrayidx137 = getelementptr inbounds i64, ptr %iwork, i64 %incdec.ptr6.sum176
+ store i64 1, ptr %arrayidx137, align 8
%inc139 = add nsw i64 %storemerge4226, 1
%cmp131 = icmp sgt i64 %storemerge4226, 42
br i1 %cmp131, label %bb2, label %bb1
; to that should continue to read from the original compare.
; CHECK: %tobool.5 = icmp ne i32 %num, 0
-; CHECK: store i1 %tobool.5, i1* %ptr
+; CHECK: store i1 %tobool.5, ptr %ptr
-define internal fastcc i32 @num_bits_set(i32 %num, i1* %ptr) #1 {
+define internal fastcc i32 @num_bits_set(i32 %num, ptr %ptr) #1 {
entry:
%tobool.5 = icmp ne i32 %num, 0
- store i1 %tobool.5, i1* %ptr
+ store i1 %tobool.5, ptr %ptr
br i1 %tobool.5, label %for.body.lr.ph, label %for.end
for.body.lr.ph: ; preds = %entry
target triple = "x86_64-apple-darwin10.0.0"
-define void @foo(double* nocapture %a) nounwind ssp !dbg !0 {
+define void @foo(ptr nocapture %a) nounwind ssp !dbg !0 {
; CHECK-LABEL: @foo(
; CHECK-NEXT: entry:
-; CHECK-NEXT: tail call void @llvm.dbg.value(metadata double* [[A:%.*]], metadata [[META7:![0-9]+]], metadata !DIExpression()), !dbg [[DBG10:![0-9]+]]
+; CHECK-NEXT: tail call void @llvm.dbg.value(metadata ptr [[A:%.*]], metadata [[META7:![0-9]+]], metadata !DIExpression()), !dbg [[DBG10:![0-9]+]]
; CHECK-NEXT: tail call void @llvm.dbg.value(metadata i32 0, metadata [[META11:![0-9]+]], metadata !DIExpression()), !dbg [[DBG15:![0-9]+]]
-; CHECK-NEXT: [[A1:%.*]] = bitcast double* [[A]] to i8*
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[A1]], i8 0, i64 8000, i1 false), !dbg [[DBG16:![0-9]+]]
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[A]], i8 0, i64 8000, i1 false), !dbg [[DBG16:![0-9]+]]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr double, double* [[A]], i64 [[INDVAR]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr double, ptr [[A]], i64 [[INDVAR]]
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVAR_NEXT]], 1000
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END:%.*]], !dbg [[DBG15]]
; CHECK-NEXT: ret void, !dbg [[DBG18:![0-9]+]]
;
entry:
- tail call void @llvm.dbg.value(metadata double* %a, metadata !5, metadata !DIExpression()), !dbg !8
+ tail call void @llvm.dbg.value(metadata ptr %a, metadata !5, metadata !DIExpression()), !dbg !8
tail call void @llvm.dbg.value(metadata i32 0, metadata !10, metadata !DIExpression()), !dbg !14
br label %for.body
for.body: ; preds = %entry, %for.body
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ]
- %arrayidx = getelementptr double, double* %a, i64 %indvar
- store double 0.000000e+00, double* %arrayidx, align 8, !dbg !15
+ %arrayidx = getelementptr double, ptr %a, i64 %indvar
+ store double 0.000000e+00, ptr %arrayidx, align 8, !dbg !15
%indvar.next = add i64 %indvar, 1
%exitcond = icmp ne i64 %indvar.next, 1000
br i1 %exitcond, label %for.body, label %for.end, !dbg !14
; DIS-NONE-NEXT: bb.nph:
; DIS-NONE-NEXT: [[BASE:%.*]] = alloca i8, i32 10000, align 1
; DIS-NONE-NEXT: [[DEST:%.*]] = alloca i8, i32 10000, align 1
-; DIS-NONE-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[DEST]], i8* align 1 [[BASE]], i64 [[SIZE:%.*]], i1 false)
+; DIS-NONE-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[BASE]], i64 [[SIZE:%.*]], i1 false)
; DIS-NONE-NEXT: br label [[FOR_BODY:%.*]]
; DIS-NONE: for.body:
; DIS-NONE-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; DIS-NONE-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
-; DIS-NONE-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[DEST]], i64 [[INDVAR]]
-; DIS-NONE-NEXT: [[V:%.*]] = load i8, i8* [[I_0_014]], align 1
+; DIS-NONE-NEXT: [[I_0_014:%.*]] = getelementptr i8, ptr [[BASE]], i64 [[INDVAR]]
+; DIS-NONE-NEXT: [[DESTI:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[INDVAR]]
+; DIS-NONE-NEXT: [[V:%.*]] = load i8, ptr [[I_0_014]], align 1
; DIS-NONE-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; DIS-NONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
; DIS-NONE-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; DIS-ALL-NEXT: br label [[FOR_BODY:%.*]]
; DIS-ALL: for.body:
; DIS-ALL-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; DIS-ALL-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
-; DIS-ALL-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[DEST]], i64 [[INDVAR]]
-; DIS-ALL-NEXT: [[V:%.*]] = load i8, i8* [[I_0_014]], align 1
-; DIS-ALL-NEXT: store i8 [[V]], i8* [[DESTI]], align 1
+; DIS-ALL-NEXT: [[I_0_014:%.*]] = getelementptr i8, ptr [[BASE]], i64 [[INDVAR]]
+; DIS-ALL-NEXT: [[DESTI:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[INDVAR]]
+; DIS-ALL-NEXT: [[V:%.*]] = load i8, ptr [[I_0_014]], align 1
+; DIS-ALL-NEXT: store i8 [[V]], ptr [[DESTI]], align 1
; DIS-ALL-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; DIS-ALL-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
; DIS-ALL-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; DIS-MEMCPY-NEXT: br label [[FOR_BODY:%.*]]
; DIS-MEMCPY: for.body:
; DIS-MEMCPY-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; DIS-MEMCPY-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
-; DIS-MEMCPY-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[DEST]], i64 [[INDVAR]]
-; DIS-MEMCPY-NEXT: [[V:%.*]] = load i8, i8* [[I_0_014]], align 1
-; DIS-MEMCPY-NEXT: store i8 [[V]], i8* [[DESTI]], align 1
+; DIS-MEMCPY-NEXT: [[I_0_014:%.*]] = getelementptr i8, ptr [[BASE]], i64 [[INDVAR]]
+; DIS-MEMCPY-NEXT: [[DESTI:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[INDVAR]]
+; DIS-MEMCPY-NEXT: [[V:%.*]] = load i8, ptr [[I_0_014]], align 1
+; DIS-MEMCPY-NEXT: store i8 [[V]], ptr [[DESTI]], align 1
; DIS-MEMCPY-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; DIS-MEMCPY-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
; DIS-MEMCPY-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; DIS-MEMSET-NEXT: bb.nph:
; DIS-MEMSET-NEXT: [[BASE:%.*]] = alloca i8, i32 10000, align 1
; DIS-MEMSET-NEXT: [[DEST:%.*]] = alloca i8, i32 10000, align 1
-; DIS-MEMSET-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[DEST]], i8* align 1 [[BASE]], i64 [[SIZE:%.*]], i1 false)
+; DIS-MEMSET-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[BASE]], i64 [[SIZE:%.*]], i1 false)
; DIS-MEMSET-NEXT: br label [[FOR_BODY:%.*]]
; DIS-MEMSET: for.body:
; DIS-MEMSET-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; DIS-MEMSET-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
-; DIS-MEMSET-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[DEST]], i64 [[INDVAR]]
-; DIS-MEMSET-NEXT: [[V:%.*]] = load i8, i8* [[I_0_014]], align 1
+; DIS-MEMSET-NEXT: [[I_0_014:%.*]] = getelementptr i8, ptr [[BASE]], i64 [[INDVAR]]
+; DIS-MEMSET-NEXT: [[DESTI:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[INDVAR]]
+; DIS-MEMSET-NEXT: [[V:%.*]] = load i8, ptr [[I_0_014]], align 1
; DIS-MEMSET-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; DIS-MEMSET-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
; DIS-MEMSET-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
- %DestI = getelementptr i8, i8* %Dest, i64 %indvar
- %V = load i8, i8* %I.0.014, align 1
- store i8 %V, i8* %DestI, align 1
+ %I.0.014 = getelementptr i8, ptr %Base, i64 %indvar
+ %DestI = getelementptr i8, ptr %Dest, i64 %indvar
+ %V = load i8, ptr %I.0.014, align 1
+ store i8 %V, ptr %DestI, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
ret void
}
-define void @test-memset(i8* %Base, i64 %Size) nounwind ssp {
+define void @test-memset(ptr %Base, i64 %Size) nounwind ssp {
; CHECK-LABEL: @test-memset(
; CHECK-NEXT: bb.nph:
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[BASE:%.*]], i8 0, i64 [[SIZE:%.*]], i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[BASE:%.*]], i8 0, i64 [[SIZE:%.*]], i1 false)
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
+; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, ptr [[BASE]], i64 [[INDVAR]]
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
;
; DIS-NONE-LABEL: @test-memset(
; DIS-NONE-NEXT: bb.nph:
-; DIS-NONE-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[BASE:%.*]], i8 0, i64 [[SIZE:%.*]], i1 false)
+; DIS-NONE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[BASE:%.*]], i8 0, i64 [[SIZE:%.*]], i1 false)
; DIS-NONE-NEXT: br label [[FOR_BODY:%.*]]
; DIS-NONE: for.body:
; DIS-NONE-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; DIS-NONE-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
+; DIS-NONE-NEXT: [[I_0_014:%.*]] = getelementptr i8, ptr [[BASE]], i64 [[INDVAR]]
; DIS-NONE-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; DIS-NONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
; DIS-NONE-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; DIS-ALL-NEXT: br label [[FOR_BODY:%.*]]
; DIS-ALL: for.body:
; DIS-ALL-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; DIS-ALL-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE:%.*]], i64 [[INDVAR]]
-; DIS-ALL-NEXT: store i8 0, i8* [[I_0_014]], align 1
+; DIS-ALL-NEXT: [[I_0_014:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[INDVAR]]
+; DIS-ALL-NEXT: store i8 0, ptr [[I_0_014]], align 1
; DIS-ALL-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; DIS-ALL-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
; DIS-ALL-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
;
; DIS-MEMCPY-LABEL: @test-memset(
; DIS-MEMCPY-NEXT: bb.nph:
-; DIS-MEMCPY-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[BASE:%.*]], i8 0, i64 [[SIZE:%.*]], i1 false)
+; DIS-MEMCPY-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[BASE:%.*]], i8 0, i64 [[SIZE:%.*]], i1 false)
; DIS-MEMCPY-NEXT: br label [[FOR_BODY:%.*]]
; DIS-MEMCPY: for.body:
; DIS-MEMCPY-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; DIS-MEMCPY-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
+; DIS-MEMCPY-NEXT: [[I_0_014:%.*]] = getelementptr i8, ptr [[BASE]], i64 [[INDVAR]]
; DIS-MEMCPY-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; DIS-MEMCPY-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
; DIS-MEMCPY-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; DIS-MEMSET-NEXT: br label [[FOR_BODY:%.*]]
; DIS-MEMSET: for.body:
; DIS-MEMSET-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; DIS-MEMSET-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE:%.*]], i64 [[INDVAR]]
-; DIS-MEMSET-NEXT: store i8 0, i8* [[I_0_014]], align 1
+; DIS-MEMSET-NEXT: [[I_0_014:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[INDVAR]]
+; DIS-MEMSET-NEXT: store i8 0, ptr [[I_0_014]], align 1
; DIS-MEMSET-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; DIS-MEMSET-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
; DIS-MEMSET-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
- store i8 0, i8* %I.0.014, align 1
+ %I.0.014 = getelementptr i8, ptr %Base, i64 %indvar
+ store i8 0, ptr %I.0.014, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
; Make sure we do not delete instructions not inserted during expansion, e.g.
; because the expande re-used existing instructions.
-define void @test(i64 %init, float* %ptr) {
+define void @test(i64 %init, ptr %ptr) {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[OUTER_HEADER:%.*]]
; CHECK: outer.header:
; CHECK-NEXT: [[J_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[OUTER_LATCH:%.*]] ]
; CHECK-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[OUTER_LATCH]] ]
-; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds float, float* [[PTR:%.*]], i32 [[I_0]]
+; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds float, ptr [[PTR:%.*]], i32 [[I_0]]
; CHECK-NEXT: br label [[INNER:%.*]]
; CHECK: inner:
; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ [[INNER_IV_NEXT:%.*]], [[INNER]] ], [ [[INIT:%.*]], [[OUTER_HEADER]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[PTR]], i64 [[INNER_IV]]
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[ARRAYIDX]] to i32*
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
-; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[ADD_PTR]], i64 [[INNER_IV]]
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[ARRAYIDX3]] to i32*
-; CHECK-NEXT: store i32 [[TMP1]], i32* [[TMP2]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[PTR]], i64 [[INNER_IV]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[ADD_PTR]], i64 [[INNER_IV]]
+; CHECK-NEXT: store i32 [[TMP1]], ptr [[ARRAYIDX3]], align 4
; CHECK-NEXT: [[INNER_IV_NEXT]] = add nsw i64 [[INNER_IV]], 1
; CHECK-NEXT: [[EC_1:%.*]] = icmp eq i64 [[INNER_IV_NEXT]], 0
; CHECK-NEXT: br i1 [[EC_1]], label [[OUTER_LATCH]], label [[INNER]]
outer.header:
%j.0 = phi i32 [ 0, %entry ], [ %inc, %outer.latch ]
%i.0 = phi i32 [ 0, %entry ], [ %add, %outer.latch ]
- %add.ptr = getelementptr inbounds float, float* %ptr, i32 %i.0
+ %add.ptr = getelementptr inbounds float, ptr %ptr, i32 %i.0
br label %inner
inner:
%inner.iv = phi i64 [ %inner.iv.next, %inner ], [ %init, %outer.header ]
- %arrayidx = getelementptr inbounds float, float* %ptr, i64 %inner.iv
- %0 = bitcast float* %arrayidx to i32*
- %1 = load i32, i32* %0, align 4
- %arrayidx3 = getelementptr inbounds float, float* %add.ptr, i64 %inner.iv
- %2 = bitcast float* %arrayidx3 to i32*
- store i32 %1, i32* %2, align 4
+ %arrayidx = getelementptr inbounds float, ptr %ptr, i64 %inner.iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %arrayidx3 = getelementptr inbounds float, ptr %add.ptr, i64 %inner.iv
+ store i32 %0, ptr %arrayidx3, align 4
%inner.iv.next = add nsw i64 %inner.iv, 1
%ec.1 = icmp eq i64 %inner.iv.next, 0
br i1 %ec.1, label %outer.latch, label %inner
; CHECK-LABEL: zero
; CHECK: llvm.memset
-define void @zero(float* %p, i64 %n) nounwind {
+define void @zero(ptr %p, i64 %n) nounwind {
bb7.lr.ph:
br label %bb7
bb7:
%i.02 = phi i64 [ 0, %bb7.lr.ph ], [ %tmp13, %bb7 ]
- %tmp10 = getelementptr inbounds float, float* %p, i64 %i.02
- store float 0.000000e+00, float* %tmp10, align 4
+ %tmp10 = getelementptr inbounds float, ptr %p, i64 %i.02
+ store float 0.000000e+00, ptr %tmp10, align 4
%tmp13 = add i64 %i.02, 1
%tmp6 = icmp ult i64 %tmp13, %n
br i1 %tmp6, label %bb7, label %bb14
; - a loop_memset idiom, or
; - a memset/memcpy idiom in a nested loop.
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
@APPLES = common global i32 0, align 4
@ORANGES = common global i32 0, align 4
for.body: ; preds = %for.body.preheader, %for.inc
%indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.inc ]
%BASKET.013 = phi i32 [ %BASKET.1, %for.inc ], [ 0, %for.body.preheader ]
- %arraydecay = getelementptr inbounds [2048 x i8], [2048 x i8]* %DST, i64 %indvars.iv, i64 0
- tail call void @llvm.memset.p0i8.i64(i8* %arraydecay, i8 -1, i64 2048, i1 false)
+ %arraydecay = getelementptr inbounds [2048 x i8], ptr %DST, i64 %indvars.iv, i64 0
+ tail call void @llvm.memset.p0.i64(ptr %arraydecay, i8 -1, i64 2048, i1 false)
%0 = trunc i64 %indvars.iv to i32
%rem11 = and i32 %0, 1
%cmp1 = icmp eq i32 %rem11, 0
- %1 = load i32, i32* @ORANGES, align 4
- %2 = load i32, i32* @APPLES, align 4
+ %1 = load i32, ptr @ORANGES, align 4
+ %2 = load i32, ptr @APPLES, align 4
br i1 %cmp1, label %if.then, label %if.else
if.else: ; preds = %for.body
%dec3 = add nsw i32 %2, -1
- store i32 %dec3, i32* @APPLES, align 4
+ store i32 %dec3, ptr @APPLES, align 4
br label %for.inc
if.then: ; preds = %for.body
%dec = add nsw i32 %1, -1
- store i32 %dec, i32* @ORANGES, align 4
+ store i32 %dec, ptr @ORANGES, align 4
br label %for.inc
for.inc: ; preds = %if.then, %if.else
for.body3: ; preds = %for.cond1.preheader, %for.inc
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.inc ]
%BASKET.123 = phi i32 [ %BASKET.026, %for.cond1.preheader ], [ %BASKET.2, %for.inc ]
- %arrayidx5 = getelementptr inbounds [2046 x i8], [2046 x i8]* %DST, i64 %idxprom4, i64 %indvars.iv
- store i8 -1, i8* %arrayidx5, align 1
- %0 = load i32, i32* @APPLES, align 4
- %1 = load i32, i32* @ORANGES, align 4
+ %arrayidx5 = getelementptr inbounds [2046 x i8], ptr %DST, i64 %idxprom4, i64 %indvars.iv
+ store i8 -1, ptr %arrayidx5, align 1
+ %0 = load i32, ptr @APPLES, align 4
+ %1 = load i32, ptr @ORANGES, align 4
br i1 %cmp6, label %if.then, label %if.else
if.else: ; preds = %for.body3
%dec8 = add nsw i32 %0, -1
- store i32 %dec8, i32* @APPLES, align 4
+ store i32 %dec8, ptr @APPLES, align 4
br label %for.inc
if.then: ; preds = %for.body3
%dec = add nsw i32 %1, -1
- store i32 %dec, i32* @ORANGES, align 4
+ store i32 %dec, ptr @ORANGES, align 4
br label %for.inc
for.inc: ; preds = %if.then, %if.else
for.body: ; preds = %for.body.preheader, %for.inc
%indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.inc ]
%BASKET.013 = phi i32 [ %BASKET.1, %for.inc ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds i8, i8* %DST, i64 %indvars.iv
- store i8 -1, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %DST, i64 %indvars.iv
+ store i8 -1, ptr %arrayidx, align 1
%0 = trunc i64 %indvars.iv to i32
%rem11 = and i32 %0, 1
%cmp1 = icmp eq i32 %rem11, 0
- %1 = load i32, i32* @ORANGES, align 4
- %2 = load i32, i32* @APPLES, align 4
+ %1 = load i32, ptr @ORANGES, align 4
+ %2 = load i32, ptr @APPLES, align 4
br i1 %cmp1, label %if.then, label %if.else
if.else: ; preds = %for.body
%dec3 = add nsw i32 %2, -1
- store i32 %dec3, i32* @APPLES, align 4
+ store i32 %dec3, ptr @APPLES, align 4
br label %for.inc
if.then: ; preds = %for.body
%dec = add nsw i32 %1, -1
- store i32 %dec, i32* @ORANGES, align 4
+ store i32 %dec, ptr @ORANGES, align 4
br label %for.inc
for.inc: ; preds = %if.then, %if.else
; }
; Function Attrs: nofree nounwind uwtable mustprogress
-define dso_local i32 @copy_noalias(%struct.S* noalias nocapture %a, %struct.S* nocapture readonly %b, i32 %n) local_unnamed_addr #0 {
+define dso_local i32 @copy_noalias(ptr noalias nocapture %a, ptr nocapture readonly %b, i32 %n) local_unnamed_addr #0 {
; CHECK-LABEL: @copy_noalias(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[N:%.*]], 0
; CHECK: for.body:
; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[I_08]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[B:%.*]], i64 [[IDXPROM]]
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[A:%.*]], i64 [[IDXPROM]]
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast %struct.S* [[ARRAYIDX2]] to i8*
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast %struct.S* [[ARRAYIDX]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* nonnull align 4 dereferenceable(12) [[TMP0]], i8* nonnull align 4 dereferenceable(12) [[TMP1]], i64 12, i1 false)
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[B:%.*]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[A:%.*]], i64 [[IDXPROM]]
+; CHECK-NEXT: call void @llvm.memcpy.inline.p0.p0.i64(ptr nonnull align 4 dereferenceable(12) [[ARRAYIDX2]], ptr nonnull align 4 dereferenceable(12) [[ARRAYIDX]], i64 12, i1 false)
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[INC]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]]
for.body: ; preds = %for.body.preheader, %for.body
%i.08 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
%idxprom = zext i32 %i.08 to i64
- %arrayidx = getelementptr inbounds %struct.S, %struct.S* %b, i64 %idxprom
- %arrayidx2 = getelementptr inbounds %struct.S, %struct.S* %a, i64 %idxprom
- %0 = bitcast %struct.S* %arrayidx2 to i8*
- %1 = bitcast %struct.S* %arrayidx to i8*
- call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* nonnull align 4 dereferenceable(12) %0, i8* nonnull align 4 dereferenceable(12) %1, i64 12, i1 false)
+ %arrayidx = getelementptr inbounds %struct.S, ptr %b, i64 %idxprom
+ %arrayidx2 = getelementptr inbounds %struct.S, ptr %a, i64 %idxprom
+ call void @llvm.memcpy.inline.p0.p0.i64(ptr nonnull align 4 dereferenceable(12) %arrayidx2, ptr nonnull align 4 dereferenceable(12) %arrayidx, i64 12, i1 false)
%inc = add nuw nsw i32 %i.08, 1
%cmp = icmp slt i32 %inc, %n
br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
}
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg) #1
+declare void @llvm.memcpy.inline.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #1
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-%class.SDUse = type { %class.SDValue, %class.SDUse**, %class.SDUse* }
+%class.SDUse = type { %class.SDValue, ptr, ptr }
%class.SDValue = type { i32, i32, i32 }
declare dso_local i32 @__gxx_personality_v0(...)
; Function Attrs: uwtable mustprogress
-define linkonce_odr dso_local %class.SDValue* @_ZNSt20__uninitialized_copyILb0EE13__uninit_copyIP5SDUseP7SDValueEET0_T_S7_S6_(%class.SDUse* %__first, %class.SDUse* %__last, %class.SDValue* %__result) local_unnamed_addr #0 align 2 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define linkonce_odr dso_local ptr @_ZNSt20__uninitialized_copyILb0EE13__uninit_copyIP5SDUseP7SDValueEET0_T_S7_S6_(ptr %__first, ptr %__last, ptr %__result) local_unnamed_addr #0 align 2 personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: @_ZNSt20__uninitialized_copyILb0EE13__uninit_copyIP5SDUseP7SDValueEET0_T_S7_S6_(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CMP_NOT15:%.*]] = icmp eq %class.SDUse* [[__FIRST:%.*]], [[__LAST:%.*]]
+; CHECK-NEXT: [[CMP_NOT15:%.*]] = icmp eq ptr [[__FIRST:%.*]], [[__LAST:%.*]]
; CHECK-NEXT: br i1 [[CMP_NOT15]], label [[FOR_END:%.*]], label [[FOR_INC_PREHEADER:%.*]]
; CHECK: for.inc.preheader:
; CHECK-NEXT: br label [[FOR_INC:%.*]]
; CHECK: for.inc:
-; CHECK-NEXT: [[__CUR_017:%.*]] = phi %class.SDValue* [ [[INCDEC_PTR1:%.*]], [[FOR_INC]] ], [ [[__RESULT:%.*]], [[FOR_INC_PREHEADER]] ]
-; CHECK-NEXT: [[__FIRST_ADDR_016:%.*]] = phi %class.SDUse* [ [[INCDEC_PTR:%.*]], [[FOR_INC]] ], [ [[__FIRST]], [[FOR_INC_PREHEADER]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast %class.SDValue* [[__CUR_017]] to i8*
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast %class.SDUse* [[__FIRST_ADDR_016]] to i8*
-; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(12) [[TMP0]], i8* noundef nonnull align 8 dereferenceable(12) [[TMP1]], i64 12, i1 false)
-; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds [[CLASS_SDUSE:%.*]], %class.SDUse* [[__FIRST_ADDR_016]], i64 1
-; CHECK-NEXT: [[INCDEC_PTR1]] = getelementptr inbounds [[CLASS_SDVALUE:%.*]], %class.SDValue* [[__CUR_017]], i64 1
-; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq %class.SDUse* [[INCDEC_PTR]], [[__LAST]]
+; CHECK-NEXT: [[__CUR_017:%.*]] = phi ptr [ [[INCDEC_PTR1:%.*]], [[FOR_INC]] ], [ [[__RESULT:%.*]], [[FOR_INC_PREHEADER]] ]
+; CHECK-NEXT: [[__FIRST_ADDR_016:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_INC]] ], [ [[__FIRST]], [[FOR_INC_PREHEADER]] ]
+; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(12) [[__CUR_017]], ptr noundef nonnull align 8 dereferenceable(12) [[__FIRST_ADDR_016]], i64 12, i1 false)
+; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds [[CLASS_SDUSE:%.*]], ptr [[__FIRST_ADDR_016]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1]] = getelementptr inbounds [[CLASS_SDVALUE:%.*]], ptr [[__CUR_017]], i64 1
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq ptr [[INCDEC_PTR]], [[__LAST]]
; CHECK-NEXT: br i1 [[CMP_NOT]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_INC]]
; CHECK: for.end.loopexit:
-; CHECK-NEXT: [[INCDEC_PTR1_LCSSA:%.*]] = phi %class.SDValue* [ [[INCDEC_PTR1]], [[FOR_INC]] ]
+; CHECK-NEXT: [[INCDEC_PTR1_LCSSA:%.*]] = phi ptr [ [[INCDEC_PTR1]], [[FOR_INC]] ]
; CHECK-NEXT: br label [[FOR_END]]
; CHECK: for.end:
-; CHECK-NEXT: [[__CUR_0_LCSSA:%.*]] = phi %class.SDValue* [ [[__RESULT]], [[ENTRY:%.*]] ], [ [[INCDEC_PTR1_LCSSA]], [[FOR_END_LOOPEXIT]] ]
-; CHECK-NEXT: ret %class.SDValue* [[__CUR_0_LCSSA]]
+; CHECK-NEXT: [[__CUR_0_LCSSA:%.*]] = phi ptr [ [[__RESULT]], [[ENTRY:%.*]] ], [ [[INCDEC_PTR1_LCSSA]], [[FOR_END_LOOPEXIT]] ]
+; CHECK-NEXT: ret ptr [[__CUR_0_LCSSA]]
;
entry:
- %cmp.not15 = icmp eq %class.SDUse* %__first, %__last
+ %cmp.not15 = icmp eq ptr %__first, %__last
br i1 %cmp.not15, label %for.end, label %for.inc.preheader
for.inc.preheader: ; preds = %entry
br label %for.inc
for.inc: ; preds = %for.inc.preheader, %for.inc
- %__cur.017 = phi %class.SDValue* [ %incdec.ptr1, %for.inc ], [ %__result, %for.inc.preheader ]
- %__first.addr.016 = phi %class.SDUse* [ %incdec.ptr, %for.inc ], [ %__first, %for.inc.preheader ]
- %0 = bitcast %class.SDValue* %__cur.017 to i8*
- %1 = bitcast %class.SDUse* %__first.addr.016 to i8*
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(12) %0, i8* noundef nonnull align 8 dereferenceable(12) %1, i64 12, i1 false)
- %incdec.ptr = getelementptr inbounds %class.SDUse, %class.SDUse* %__first.addr.016, i64 1
- %incdec.ptr1 = getelementptr inbounds %class.SDValue, %class.SDValue* %__cur.017, i64 1
- %cmp.not = icmp eq %class.SDUse* %incdec.ptr, %__last
+ %__cur.017 = phi ptr [ %incdec.ptr1, %for.inc ], [ %__result, %for.inc.preheader ]
+ %__first.addr.016 = phi ptr [ %incdec.ptr, %for.inc ], [ %__first, %for.inc.preheader ]
+ tail call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(12) %__cur.017, ptr noundef nonnull align 8 dereferenceable(12) %__first.addr.016, i64 12, i1 false)
+ %incdec.ptr = getelementptr inbounds %class.SDUse, ptr %__first.addr.016, i64 1
+ %incdec.ptr1 = getelementptr inbounds %class.SDValue, ptr %__cur.017, i64 1
+ %cmp.not = icmp eq ptr %incdec.ptr, %__last
br i1 %cmp.not, label %for.end.loopexit, label %for.inc
for.end.loopexit: ; preds = %for.inc
- %incdec.ptr1.lcssa = phi %class.SDValue* [ %incdec.ptr1, %for.inc ]
+ %incdec.ptr1.lcssa = phi ptr [ %incdec.ptr1, %for.inc ]
br label %for.end
for.end: ; preds = %for.end.loopexit, %entry
- %__cur.0.lcssa = phi %class.SDValue* [ %__result, %entry ], [ %incdec.ptr1.lcssa, %for.end.loopexit ]
- ret %class.SDValue* %__cur.0.lcssa
+ %__cur.0.lcssa = phi ptr [ %__result, %entry ], [ %incdec.ptr1.lcssa, %for.end.loopexit ]
+ ret ptr %__cur.0.lcssa
}
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg) #1
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #1
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes="loop-idiom" < %s -S | FileCheck %s
-define void @looper(double* noalias nocapture readonly %M, double* noalias nocapture %out) {
+define void @looper(ptr noalias nocapture readonly %M, ptr noalias nocapture %out) {
; CHECK-LABEL: @looper(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[OUT1:%.*]] = bitcast double* [[OUT:%.*]] to i8*
-; CHECK-NEXT: [[M2:%.*]] = bitcast double* [[M:%.*]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[OUT1]], i8* align 8 [[M2]], i64 256, i1 false), !tbaa [[TBAA0:![0-9]+]]
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[OUT:%.*]], ptr align 8 [[M:%.*]], i64 256, i1 false), !tbaa [[TBAA0:![0-9]+]]
; CHECK-NEXT: br label [[FOR_BODY4:%.*]]
; CHECK: for.body4:
; CHECK-NEXT: [[J_020:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY4]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[M]], i64 [[J_020]]
-; CHECK-NEXT: [[A0:%.*]] = load double, double* [[ARRAYIDX]], align 8, !tbaa [[TBAA0]]
-; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 [[J_020]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[M]], i64 [[J_020]]
+; CHECK-NEXT: [[A0:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, ptr [[OUT]], i64 [[J_020]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[J_020]], 1
; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[J_020]], 31
; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY4]], label [[FOR_COND_CLEANUP:%.*]]
for.body4: ; preds = %for.cond1.preheader, %for.body4
%j.020 = phi i64 [ 0, %entry ], [ %inc, %for.body4 ]
- %arrayidx = getelementptr inbounds double, double* %M, i64 %j.020
- %a0 = load double, double* %arrayidx, align 8, !tbaa !5
- %arrayidx8 = getelementptr inbounds double, double* %out, i64 %j.020
- store double %a0, double* %arrayidx8, align 8, !tbaa !5
+ %arrayidx = getelementptr inbounds double, ptr %M, i64 %j.020
+ %a0 = load double, ptr %arrayidx, align 8, !tbaa !5
+ %arrayidx8 = getelementptr inbounds double, ptr %out, i64 %j.020
+ store double %a0, ptr %arrayidx8, align 8, !tbaa !5
%inc = add nuw nsw i64 %j.020, 1
%cmp2 = icmp ult i64 %j.020, 31
br i1 %cmp2, label %for.body4, label %for.cond.cleanup
}
-define void @looperBadMerge(double* noalias nocapture readonly %M, double* noalias nocapture %out) {
+define void @looperBadMerge(ptr noalias nocapture readonly %M, ptr noalias nocapture %out) {
; CHECK-LABEL: @looperBadMerge(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[OUT1:%.*]] = bitcast double* [[OUT:%.*]] to i8*
-; CHECK-NEXT: [[M2:%.*]] = bitcast double* [[M:%.*]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[OUT1]], i8* align 8 [[M2]], i64 256, i1 false), !tbaa [[TBAA4:![0-9]+]]
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[OUT:%.*]], ptr align 8 [[M:%.*]], i64 256, i1 false), !tbaa [[TBAA4:![0-9]+]]
; CHECK-NEXT: br label [[FOR_BODY4:%.*]]
; CHECK: for.body4:
; CHECK-NEXT: [[J_020:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY4]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[M]], i64 [[J_020]]
-; CHECK-NEXT: [[A0:%.*]] = load double, double* [[ARRAYIDX]], align 8, !tbaa [[TBAA0]]
-; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 [[J_020]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[M]], i64 [[J_020]]
+; CHECK-NEXT: [[A0:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, ptr [[OUT]], i64 [[J_020]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[J_020]], 1
; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[J_020]], 31
; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY4]], label [[FOR_COND_CLEANUP:%.*]]
for.body4: ; preds = %for.cond1.preheader, %for.body4
%j.020 = phi i64 [ 0, %entry ], [ %inc, %for.body4 ]
- %arrayidx = getelementptr inbounds double, double* %M, i64 %j.020
- %a0 = load double, double* %arrayidx, align 8, !tbaa !5
- %arrayidx8 = getelementptr inbounds double, double* %out, i64 %j.020
- store double %a0, double* %arrayidx8, align 8, !tbaa !3
+ %arrayidx = getelementptr inbounds double, ptr %M, i64 %j.020
+ %a0 = load double, ptr %arrayidx, align 8, !tbaa !5
+ %arrayidx8 = getelementptr inbounds double, ptr %out, i64 %j.020
+ store double %a0, ptr %arrayidx8, align 8, !tbaa !3
%inc = add nuw nsw i64 %j.020, 1
%cmp2 = icmp ult i64 %j.020, 31
br i1 %cmp2, label %for.body4, label %for.cond.cleanup
ret void
}
-define void @looperGoodMerge(double* noalias nocapture readonly %M, double* noalias nocapture %out) {
+define void @looperGoodMerge(ptr noalias nocapture readonly %M, ptr noalias nocapture %out) {
; CHECK-LABEL: @looperGoodMerge(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[OUT1:%.*]] = bitcast double* [[OUT:%.*]] to i8*
-; CHECK-NEXT: [[M2:%.*]] = bitcast double* [[M:%.*]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[OUT1]], i8* align 8 [[M2]], i64 256, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[OUT:%.*]], ptr align 8 [[M:%.*]], i64 256, i1 false)
; CHECK-NEXT: br label [[FOR_BODY4:%.*]]
; CHECK: for.body4:
; CHECK-NEXT: [[J_020:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY4]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[M]], i64 [[J_020]]
-; CHECK-NEXT: [[A0:%.*]] = load double, double* [[ARRAYIDX]], align 8, !tbaa [[TBAA0]]
-; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 [[J_020]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[M]], i64 [[J_020]]
+; CHECK-NEXT: [[A0:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, ptr [[OUT]], i64 [[J_020]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[J_020]], 1
; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[J_020]], 31
; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY4]], label [[FOR_COND_CLEANUP:%.*]]
for.body4: ; preds = %for.cond1.preheader, %for.body4
%j.020 = phi i64 [ 0, %entry ], [ %inc, %for.body4 ]
- %arrayidx = getelementptr inbounds double, double* %M, i64 %j.020
- %a0 = load double, double* %arrayidx, align 8, !tbaa !5
- %arrayidx8 = getelementptr inbounds double, double* %out, i64 %j.020
- store double %a0, double* %arrayidx8, align 8
+ %arrayidx = getelementptr inbounds double, ptr %M, i64 %j.020
+ %a0 = load double, ptr %arrayidx, align 8, !tbaa !5
+ %arrayidx8 = getelementptr inbounds double, ptr %out, i64 %j.020
+ store double %a0, ptr %arrayidx8, align 8
%inc = add nuw nsw i64 %j.020, 1
%cmp2 = icmp ult i64 %j.020, 31
br i1 %cmp2, label %for.body4, label %for.cond.cleanup
ret void
}
-define void @looperConstantTBAAStruct(double* nocapture noalias %out, double* nocapture noalias %in) {
+define void @looperConstantTBAAStruct(ptr nocapture noalias %out, ptr nocapture noalias %in) {
; CHECK-LABEL: @looperConstantTBAAStruct(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[OUT1:%.*]] = bitcast double* [[OUT:%.*]] to i8*
-; CHECK-NEXT: [[IN2:%.*]] = bitcast double* [[IN:%.*]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[OUT1]], i8* align 8 [[IN2]], i64 32, i1 false), !tbaa [[TBAA5:![0-9]+]]
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[OUT:%.*]], ptr align 8 [[IN:%.*]], i64 32, i1 false), !tbaa [[TBAA5:![0-9]+]]
; CHECK-NEXT: br label [[FOR_BODY4:%.*]]
; CHECK: for.body4:
; CHECK-NEXT: [[J_020:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY4]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[IN]], i64 [[J_020]]
-; CHECK-NEXT: [[A0:%.*]] = load double, double* [[ARRAYIDX]], align 8, !tbaa [[TBAA9:![0-9]+]]
-; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 [[J_020]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[IN]], i64 [[J_020]]
+; CHECK-NEXT: [[A0:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA9:![0-9]+]]
+; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, ptr [[OUT]], i64 [[J_020]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[J_020]], 1
; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[J_020]], 3
; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY4]], label [[FOR_COND_CLEANUP:%.*]]
for.body4: ; preds = %for.cond1.preheader, %for.body4
%j.020 = phi i64 [ 0, %entry ], [ %inc, %for.body4 ]
- %arrayidx = getelementptr inbounds double, double* %in, i64 %j.020
- %a0 = load double, double* %arrayidx, align 8, !tbaa !10
- %arrayidx8 = getelementptr inbounds double, double* %out, i64 %j.020
- store double %a0, double* %arrayidx8, align 8, !tbaa !10
+ %arrayidx = getelementptr inbounds double, ptr %in, i64 %j.020
+ %a0 = load double, ptr %arrayidx, align 8, !tbaa !10
+ %arrayidx8 = getelementptr inbounds double, ptr %out, i64 %j.020
+ store double %a0, ptr %arrayidx8, align 8, !tbaa !10
%inc = add nuw nsw i64 %j.020, 1
%cmp2 = icmp ult i64 %j.020, 3
br i1 %cmp2, label %for.body4, label %for.cond.cleanup
ret void
}
-define void @looperVarTBAAStruct(double* nocapture noalias %out, double* nocapture noalias %in, i64 %len) {
+define void @looperVarTBAAStruct(ptr nocapture noalias %out, ptr nocapture noalias %in, i64 %len) {
; CHECK-LABEL: @looperVarTBAAStruct(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[OUT1:%.*]] = bitcast double* [[OUT:%.*]] to i8*
-; CHECK-NEXT: [[IN2:%.*]] = bitcast double* [[IN:%.*]] to i8*
; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[LEN:%.*]], i64 1)
; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[UMAX]], 3
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[OUT1]], i8* align 8 [[IN2]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[OUT:%.*]], ptr align 8 [[IN:%.*]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: br label [[FOR_BODY4:%.*]]
; CHECK: for.body4:
; CHECK-NEXT: [[J_020:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY4]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[IN]], i64 [[J_020]]
-; CHECK-NEXT: [[A0:%.*]] = load double, double* [[ARRAYIDX]], align 8, !tbaa [[TBAA9]]
-; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 [[J_020]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[IN]], i64 [[J_020]]
+; CHECK-NEXT: [[A0:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA9]]
+; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, ptr [[OUT]], i64 [[J_020]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[J_020]], 1
; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[INC]], [[LEN]]
; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY4]], label [[FOR_COND_CLEANUP:%.*]]
for.body4: ; preds = %for.cond1.preheader, %for.body4
%j.020 = phi i64 [ 0, %entry ], [ %inc, %for.body4 ]
- %arrayidx = getelementptr inbounds double, double* %in, i64 %j.020
- %a0 = load double, double* %arrayidx, align 8, !tbaa !10
- %arrayidx8 = getelementptr inbounds double, double* %out, i64 %j.020
- store double %a0, double* %arrayidx8, align 8, !tbaa !10
+ %arrayidx = getelementptr inbounds double, ptr %in, i64 %j.020
+ %a0 = load double, ptr %arrayidx, align 8, !tbaa !10
+ %arrayidx8 = getelementptr inbounds double, ptr %out, i64 %j.020
+ store double %a0, ptr %arrayidx8, align 8, !tbaa !10
%inc = add nuw nsw i64 %j.020, 1
%cmp2 = icmp ult i64 %inc, %len
br i1 %cmp2, label %for.body4, label %for.cond.cleanup
; RUN: opt -passes=loop-idiom -S <%s | FileCheck %s
-define void @memcpy_fixed_vec(i64* noalias %a, i64* noalias %b) local_unnamed_addr #1 {
+define void @memcpy_fixed_vec(ptr noalias %a, ptr noalias %b) local_unnamed_addr #1 {
; CHECK-LABEL: @memcpy_fixed_vec(
; CHECK: entry:
; CHECK: memcpy
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds i64, i64* %a, i64 %index
- %1 = bitcast i64* %0 to <2 x i64>*
- %wide.load = load <2 x i64>, <2 x i64>* %1, align 8
- %2 = getelementptr inbounds i64, i64* %b, i64 %index
- %3 = bitcast i64* %2 to <2 x i64>*
- store <2 x i64> %wide.load, <2 x i64>* %3, align 8
+ %0 = getelementptr inbounds i64, ptr %a, i64 %index
+ %wide.load = load <2 x i64>, ptr %0, align 8
+ %1 = getelementptr inbounds i64, ptr %b, i64 %index
+ store <2 x i64> %wide.load, ptr %1, align 8
%index.next = add nuw nsw i64 %index, 2
- %4 = icmp eq i64 %index.next, 1024
- br i1 %4, label %for.cond.cleanup, label %vector.body
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
for.cond.cleanup: ; preds = %vector.body
ret void
}
-define void @memcpy_scalable_vec(i64* noalias %a, i64* noalias %b) local_unnamed_addr #1 {
+define void @memcpy_scalable_vec(ptr noalias %a, ptr noalias %b) local_unnamed_addr #1 {
; CHECK-LABEL: @memcpy_scalable_vec(
; CHECK: entry:
; CHECK-NOT: memcpy
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
- %0 = bitcast i64* %a to <vscale x 2 x i64>*
- %1 = getelementptr inbounds <vscale x 2 x i64>, <vscale x 2 x i64>* %0, i64 %index
- %wide.load = load <vscale x 2 x i64>, <vscale x 2 x i64>* %1, align 16
- %2 = bitcast i64* %b to <vscale x 2 x i64>*
- %3 = getelementptr inbounds <vscale x 2 x i64>, <vscale x 2 x i64>* %2, i64 %index
- store <vscale x 2 x i64> %wide.load, <vscale x 2 x i64>* %3, align 16
+ %0 = getelementptr inbounds <vscale x 2 x i64>, ptr %a, i64 %index
+ %wide.load = load <vscale x 2 x i64>, ptr %0, align 16
+ %1 = getelementptr inbounds <vscale x 2 x i64>, ptr %b, i64 %index
+ store <vscale x 2 x i64> %wide.load, ptr %1, align 16
%index.next = add nuw nsw i64 %index, 1
- %4 = icmp eq i64 %index.next, 1024
- br i1 %4, label %for.cond.cleanup, label %vector.body
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
for.cond.cleanup: ; preds = %vector.body
ret void
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=loop-idiom < %s -S | FileCheck %s
-define void @copy_both_noalias(float* noalias nocapture %d, float* noalias nocapture readonly %s, i64 %sz) {
+define void @copy_both_noalias(ptr noalias nocapture %d, ptr noalias nocapture readonly %s, i64 %sz) {
; CHECK-LABEL: @copy_both_noalias(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[D1:%.*]] = bitcast float* [[D:%.*]] to i8*
-; CHECK-NEXT: [[S2:%.*]] = bitcast float* [[S:%.*]] to i8*
; CHECK-NEXT: [[EXITCOND_NOT1:%.*]] = icmp eq i64 [[SZ:%.*]], 0
; CHECK-NEXT: br i1 [[EXITCOND_NOT1]], label [[FOR_END:%.*]], label [[FOR_BODY_PREHEADER:%.*]]
; CHECK: for.body.preheader:
; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[SZ]], 2
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[D1]], i8* align 4 [[S2]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[D:%.*]], ptr align 4 [[S:%.*]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_04:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[D_ADDR_03:%.*]] = phi float* [ [[INCDEC_PTR1:%.*]], [[FOR_BODY]] ], [ [[D]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[S_ADDR_02:%.*]] = phi float* [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[S]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, float* [[S_ADDR_02]], i64 1
-; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[S_ADDR_02]], align 4
-; CHECK-NEXT: [[INCDEC_PTR1]] = getelementptr inbounds float, float* [[D_ADDR_03]], i64 1
+; CHECK-NEXT: [[D_ADDR_03:%.*]] = phi ptr [ [[INCDEC_PTR1:%.*]], [[FOR_BODY]] ], [ [[D]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[S_ADDR_02:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[S]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, ptr [[S_ADDR_02]], i64 1
+; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[S_ADDR_02]], align 4
+; CHECK-NEXT: [[INCDEC_PTR1]] = getelementptr inbounds float, ptr [[D_ADDR_03]], i64 1
; CHECK-NEXT: [[INC]] = add i64 [[I_04]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[SZ]]
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY]]
for.body: ; preds = %for.body.preheader, %for.body
%i.04 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ]
- %d.addr.03 = phi float* [ %incdec.ptr1, %for.body ], [ %d, %for.body.preheader ]
- %s.addr.02 = phi float* [ %incdec.ptr, %for.body ], [ %s, %for.body.preheader ]
- %incdec.ptr = getelementptr inbounds float, float* %s.addr.02, i64 1
- %0 = load float, float* %s.addr.02, align 4
- %incdec.ptr1 = getelementptr inbounds float, float* %d.addr.03, i64 1
- store float %0, float* %d.addr.03, align 4
+ %d.addr.03 = phi ptr [ %incdec.ptr1, %for.body ], [ %d, %for.body.preheader ]
+ %s.addr.02 = phi ptr [ %incdec.ptr, %for.body ], [ %s, %for.body.preheader ]
+ %incdec.ptr = getelementptr inbounds float, ptr %s.addr.02, i64 1
+ %0 = load float, ptr %s.addr.02, align 4
+ %incdec.ptr1 = getelementptr inbounds float, ptr %d.addr.03, i64 1
+ store float %0, ptr %d.addr.03, align 4
%inc = add i64 %i.04, 1
%exitcond.not = icmp eq i64 %inc, %sz
br i1 %exitcond.not, label %for.end.loopexit, label %for.body
ret void
}
-define void @copy_one_noalias(float* nocapture %d, float* noalias nocapture readonly %s, i64 %sz) {
+define void @copy_one_noalias(ptr nocapture %d, ptr noalias nocapture readonly %s, i64 %sz) {
; CHECK-LABEL: @copy_one_noalias(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[D1:%.*]] = bitcast float* [[D:%.*]] to i8*
-; CHECK-NEXT: [[S2:%.*]] = bitcast float* [[S:%.*]] to i8*
; CHECK-NEXT: [[EXITCOND_NOT1:%.*]] = icmp eq i64 [[SZ:%.*]], 0
; CHECK-NEXT: br i1 [[EXITCOND_NOT1]], label [[FOR_END:%.*]], label [[FOR_BODY_PREHEADER:%.*]]
; CHECK: for.body.preheader:
; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[SZ]], 2
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[D1]], i8* align 4 [[S2]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[D:%.*]], ptr align 4 [[S:%.*]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_04:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[D_ADDR_03:%.*]] = phi float* [ [[INCDEC_PTR1:%.*]], [[FOR_BODY]] ], [ [[D]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[S_ADDR_02:%.*]] = phi float* [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[S]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, float* [[S_ADDR_02]], i64 1
-; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[S_ADDR_02]], align 4
-; CHECK-NEXT: [[INCDEC_PTR1]] = getelementptr inbounds float, float* [[D_ADDR_03]], i64 1
+; CHECK-NEXT: [[D_ADDR_03:%.*]] = phi ptr [ [[INCDEC_PTR1:%.*]], [[FOR_BODY]] ], [ [[D]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[S_ADDR_02:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[S]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, ptr [[S_ADDR_02]], i64 1
+; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[S_ADDR_02]], align 4
+; CHECK-NEXT: [[INCDEC_PTR1]] = getelementptr inbounds float, ptr [[D_ADDR_03]], i64 1
; CHECK-NEXT: [[INC]] = add i64 [[I_04]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[SZ]]
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY]]
for.body: ; preds = %for.body.preheader, %for.body
%i.04 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ]
- %d.addr.03 = phi float* [ %incdec.ptr1, %for.body ], [ %d, %for.body.preheader ]
- %s.addr.02 = phi float* [ %incdec.ptr, %for.body ], [ %s, %for.body.preheader ]
- %incdec.ptr = getelementptr inbounds float, float* %s.addr.02, i64 1
- %0 = load float, float* %s.addr.02, align 4
- %incdec.ptr1 = getelementptr inbounds float, float* %d.addr.03, i64 1
- store float %0, float* %d.addr.03, align 4
+ %d.addr.03 = phi ptr [ %incdec.ptr1, %for.body ], [ %d, %for.body.preheader ]
+ %s.addr.02 = phi ptr [ %incdec.ptr, %for.body ], [ %s, %for.body.preheader ]
+ %incdec.ptr = getelementptr inbounds float, ptr %s.addr.02, i64 1
+ %0 = load float, ptr %s.addr.02, align 4
+ %incdec.ptr1 = getelementptr inbounds float, ptr %d.addr.03, i64 1
+ store float %0, ptr %d.addr.03, align 4
%inc = add i64 %i.04, 1
%exitcond.not = icmp eq i64 %inc, %sz
br i1 %exitcond.not, label %for.end.loopexit, label %for.body
}
; PR44378
-define dso_local void @memcpy_loop(i8* noalias nocapture %p, i8* noalias nocapture readonly %q, i32 %n) {
+define dso_local void @memcpy_loop(ptr noalias nocapture %p, ptr noalias nocapture readonly %q, i32 %n) {
; CHECK-LABEL: @memcpy_loop(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[N:%.*]], 0
; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
; CHECK: for.body.preheader:
; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[P:%.*]], ptr align 1 [[Q:%.*]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup.loopexit:
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[I_07:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[P_ADDR_06:%.*]] = phi i8* [ [[INCDEC_PTR1:%.*]], [[FOR_BODY]] ], [ [[P]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[Q_ADDR_05:%.*]] = phi i8* [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[Q]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, i8* [[Q_ADDR_05]], i64 1
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* [[Q_ADDR_05]], align 1
-; CHECK-NEXT: [[INCDEC_PTR1]] = getelementptr inbounds i8, i8* [[P_ADDR_06]], i64 1
+; CHECK-NEXT: [[P_ADDR_06:%.*]] = phi ptr [ [[INCDEC_PTR1:%.*]], [[FOR_BODY]] ], [ [[P]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[Q_ADDR_05:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[Q]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[Q_ADDR_05]], i64 1
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[Q_ADDR_05]], align 1
+; CHECK-NEXT: [[INCDEC_PTR1]] = getelementptr inbounds i8, ptr [[P_ADDR_06]], i64 1
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_07]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[FOR_BODY]]
for.body:
%i.07 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
- %p.addr.06 = phi i8* [ %incdec.ptr1, %for.body ], [ %p, %entry ]
- %q.addr.05 = phi i8* [ %incdec.ptr, %for.body ], [ %q, %entry ]
- %incdec.ptr = getelementptr inbounds i8, i8* %q.addr.05, i64 1
- %0 = load i8, i8* %q.addr.05, align 1
- %incdec.ptr1 = getelementptr inbounds i8, i8* %p.addr.06, i64 1
- store i8 %0, i8* %p.addr.06, align 1
+ %p.addr.06 = phi ptr [ %incdec.ptr1, %for.body ], [ %p, %entry ]
+ %q.addr.05 = phi ptr [ %incdec.ptr, %for.body ], [ %q, %entry ]
+ %incdec.ptr = getelementptr inbounds i8, ptr %q.addr.05, i64 1
+ %0 = load i8, ptr %q.addr.05, align 1
+ %incdec.ptr1 = getelementptr inbounds i8, ptr %p.addr.06, i64 1
+ store i8 %0, ptr %p.addr.06, align 1
%inc = add nuw nsw i32 %i.07, 1
%exitcond.not = icmp eq i32 %inc, %n
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes="loop-idiom" < %s -S | FileCheck %s
-define void @looper(double* nocapture %out) {
+define void @looper(ptr nocapture %out) {
; CHECK-LABEL: @looper(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[OUT1:%.*]] = bitcast double* [[OUT:%.*]] to i8*
-; CHECK-NEXT: [[M:%.*]] = getelementptr double, double* [[OUT]], i32 16
-; CHECK-NEXT: [[M2:%.*]] = bitcast double* [[M]] to i8*
-; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 8 [[OUT1]], i8* align 8 [[M2]], i64 256, i1 false), !tbaa [[TBAA0:![0-9]+]]
+; CHECK-NEXT: [[M:%.*]] = getelementptr double, ptr [[OUT:%.*]], i32 16
+; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 8 [[OUT]], ptr align 8 [[M]], i64 256, i1 false), !tbaa [[TBAA0:![0-9]+]]
; CHECK-NEXT: br label [[FOR_BODY4:%.*]]
; CHECK: for.body4:
; CHECK-NEXT: [[J_020:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY4]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[M]], i64 [[J_020]]
-; CHECK-NEXT: [[A0:%.*]] = load double, double* [[ARRAYIDX]], align 8, !tbaa [[TBAA0]]
-; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 [[J_020]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[M]], i64 [[J_020]]
+; CHECK-NEXT: [[A0:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, ptr [[OUT]], i64 [[J_020]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[J_020]], 1
; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[J_020]], 31
; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY4]], label [[FOR_COND_CLEANUP:%.*]]
; CHECK-NEXT: ret void
;
entry:
- %M = getelementptr double, double* %out, i32 16
+ %M = getelementptr double, ptr %out, i32 16
br label %for.body4
for.body4: ; preds = %for.cond1.preheader, %for.body4
%j.020 = phi i64 [ 0, %entry ], [ %inc, %for.body4 ]
- %arrayidx = getelementptr inbounds double, double* %M, i64 %j.020
- %a0 = load double, double* %arrayidx, align 8, !tbaa !5
- %arrayidx8 = getelementptr inbounds double, double* %out, i64 %j.020
- store double %a0, double* %arrayidx8, align 8, !tbaa !5
+ %arrayidx = getelementptr inbounds double, ptr %M, i64 %j.020
+ %a0 = load double, ptr %arrayidx, align 8, !tbaa !5
+ %arrayidx8 = getelementptr inbounds double, ptr %out, i64 %j.020
+ store double %a0, ptr %arrayidx8, align 8, !tbaa !5
%inc = add nuw nsw i64 %j.020, 1
%cmp2 = icmp ult i64 %j.020, 31
br i1 %cmp2, label %for.body4, label %for.cond.cleanup
}
-define void @looperBadMerge(double* nocapture %out) {
+define void @looperBadMerge(ptr nocapture %out) {
; CHECK-LABEL: @looperBadMerge(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[OUT1:%.*]] = bitcast double* [[OUT:%.*]] to i8*
-; CHECK-NEXT: [[M:%.*]] = getelementptr double, double* [[OUT]], i32 16
-; CHECK-NEXT: [[M2:%.*]] = bitcast double* [[M]] to i8*
-; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 8 [[OUT1]], i8* align 8 [[M2]], i64 256, i1 false), !tbaa [[TBAA4:![0-9]+]]
+; CHECK-NEXT: [[M:%.*]] = getelementptr double, ptr [[OUT:%.*]], i32 16
+; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 8 [[OUT]], ptr align 8 [[M]], i64 256, i1 false), !tbaa [[TBAA4:![0-9]+]]
; CHECK-NEXT: br label [[FOR_BODY4:%.*]]
; CHECK: for.body4:
; CHECK-NEXT: [[J_020:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY4]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[M]], i64 [[J_020]]
-; CHECK-NEXT: [[A0:%.*]] = load double, double* [[ARRAYIDX]], align 8, !tbaa [[TBAA0]]
-; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 [[J_020]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[M]], i64 [[J_020]]
+; CHECK-NEXT: [[A0:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, ptr [[OUT]], i64 [[J_020]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[J_020]], 1
; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[J_020]], 31
; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY4]], label [[FOR_COND_CLEANUP:%.*]]
; CHECK-NEXT: ret void
;
entry:
- %M = getelementptr double, double* %out, i32 16
+ %M = getelementptr double, ptr %out, i32 16
br label %for.body4
for.body4: ; preds = %for.cond1.preheader, %for.body4
%j.020 = phi i64 [ 0, %entry ], [ %inc, %for.body4 ]
- %arrayidx = getelementptr inbounds double, double* %M, i64 %j.020
- %a0 = load double, double* %arrayidx, align 8, !tbaa !5
- %arrayidx8 = getelementptr inbounds double, double* %out, i64 %j.020
- store double %a0, double* %arrayidx8, align 8, !tbaa !3
+ %arrayidx = getelementptr inbounds double, ptr %M, i64 %j.020
+ %a0 = load double, ptr %arrayidx, align 8, !tbaa !5
+ %arrayidx8 = getelementptr inbounds double, ptr %out, i64 %j.020
+ store double %a0, ptr %arrayidx8, align 8, !tbaa !3
%inc = add nuw nsw i64 %j.020, 1
%cmp2 = icmp ult i64 %j.020, 31
br i1 %cmp2, label %for.body4, label %for.cond.cleanup
ret void
}
-define void @looperGoodMerge(double* nocapture %out) {
+define void @looperGoodMerge(ptr nocapture %out) {
; CHECK-LABEL: @looperGoodMerge(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[OUT1:%.*]] = bitcast double* [[OUT:%.*]] to i8*
-; CHECK-NEXT: [[M:%.*]] = getelementptr double, double* [[OUT]], i32 16
-; CHECK-NEXT: [[M2:%.*]] = bitcast double* [[M]] to i8*
-; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 8 [[OUT1]], i8* align 8 [[M2]], i64 256, i1 false)
+; CHECK-NEXT: [[M:%.*]] = getelementptr double, ptr [[OUT:%.*]], i32 16
+; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 8 [[OUT]], ptr align 8 [[M]], i64 256, i1 false)
; CHECK-NEXT: br label [[FOR_BODY4:%.*]]
; CHECK: for.body4:
; CHECK-NEXT: [[J_020:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY4]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[M]], i64 [[J_020]]
-; CHECK-NEXT: [[A0:%.*]] = load double, double* [[ARRAYIDX]], align 8, !tbaa [[TBAA0]]
-; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 [[J_020]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[M]], i64 [[J_020]]
+; CHECK-NEXT: [[A0:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, ptr [[OUT]], i64 [[J_020]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[J_020]], 1
; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[J_020]], 31
; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY4]], label [[FOR_COND_CLEANUP:%.*]]
; CHECK-NEXT: ret void
;
entry:
- %M = getelementptr double, double* %out, i32 16
+ %M = getelementptr double, ptr %out, i32 16
br label %for.body4
for.body4: ; preds = %for.cond1.preheader, %for.body4
%j.020 = phi i64 [ 0, %entry ], [ %inc, %for.body4 ]
- %arrayidx = getelementptr inbounds double, double* %M, i64 %j.020
- %a0 = load double, double* %arrayidx, align 8, !tbaa !5
- %arrayidx8 = getelementptr inbounds double, double* %out, i64 %j.020
- store double %a0, double* %arrayidx8, align 8
+ %arrayidx = getelementptr inbounds double, ptr %M, i64 %j.020
+ %a0 = load double, ptr %arrayidx, align 8, !tbaa !5
+ %arrayidx8 = getelementptr inbounds double, ptr %out, i64 %j.020
+ store double %a0, ptr %arrayidx8, align 8
%inc = add nuw nsw i64 %j.020, 1
%cmp2 = icmp ult i64 %j.020, 31
br i1 %cmp2, label %for.body4, label %for.cond.cleanup
; CHECK: loop-idiom Scanning: F[NonAffinePointer] Countable Loop %for.body
; CHECK-NEXT: Pointer is not affine, abort
-define void @MemsetSize_LoopVariant(i32* %ar, i32 %n, i32 %m) {
+define void @MemsetSize_LoopVariant(ptr %ar, i32 %n, i32 %m) {
; CHECK-LABEL: @MemsetSize_LoopVariant(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[N:%.*]] to i64
; CHECK: for.body:
; CHECK-NEXT: [[I_02:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_INC:%.*]] ]
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i64 [[I_02]], [[CONV1]]
-; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[AR:%.*]], i64 [[MUL]]
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[ADD_PTR]] to i8*
+; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, ptr [[AR:%.*]], i64 [[MUL]]
; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[I_02]], [[MUL3]]
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 [[ADD]], i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ADD_PTR]], i8 0, i64 [[ADD]], i1 false)
; CHECK-NEXT: br label [[FOR_INC]]
; CHECK: for.inc:
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_02]], 1
for.body: ; preds = %for.body.lr.ph, %for.inc
%i.02 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
%mul = mul nsw i64 %i.02, %conv1
- %add.ptr = getelementptr inbounds i32, i32* %ar, i64 %mul
- %0 = bitcast i32* %add.ptr to i8*
+ %add.ptr = getelementptr inbounds i32, ptr %ar, i64 %mul
%add = add nsw i64 %i.02, %mul3
- call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 %add, i1 false)
+ call void @llvm.memset.p0.i64(ptr align 4 %add.ptr, i8 0, i64 %add, i1 false)
br label %for.inc
for.inc: ; preds = %for.body
; memset(arr, 0, m * sizeof(int));
; }
; }
-define void @MemsetSize_Stride_Mismatch(i32* %ar, i32 %n, i32 %m) {
+define void @MemsetSize_Stride_Mismatch(ptr %ar, i32 %n, i32 %m) {
; CHECK-LABEL: @MemsetSize_Stride_Mismatch(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[N:%.*]] to i64
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_02:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_INC:%.*]] ]
-; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[AR:%.*]], i64 [[I_02]]
+; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, ptr [[AR:%.*]], i64 [[I_02]]
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i64 [[I_02]], [[CONV1]]
-; CHECK-NEXT: [[ADD_PTR2:%.*]] = getelementptr inbounds i32, i32* [[ADD_PTR]], i64 [[MUL]]
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[ADD_PTR2]] to i8*
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 [[MUL4]], i1 false)
+; CHECK-NEXT: [[ADD_PTR2:%.*]] = getelementptr inbounds i32, ptr [[ADD_PTR]], i64 [[MUL]]
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ADD_PTR2]], i8 0, i64 [[MUL4]], i1 false)
; CHECK-NEXT: br label [[FOR_INC]]
; CHECK: for.inc:
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_02]], 1
for.body: ; preds = %for.body.lr.ph, %for.inc
%i.02 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
- %add.ptr = getelementptr inbounds i32, i32* %ar, i64 %i.02
+ %add.ptr = getelementptr inbounds i32, ptr %ar, i64 %i.02
%mul = mul nsw i64 %i.02, %conv1
- %add.ptr2 = getelementptr inbounds i32, i32* %add.ptr, i64 %mul
- %0 = bitcast i32* %add.ptr2 to i8*
- call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 %mul4, i1 false)
+ %add.ptr2 = getelementptr inbounds i32, ptr %add.ptr, i64 %mul
+ call void @llvm.memset.p0.i64(ptr align 4 %add.ptr2, i8 0, i64 %mul4, i1 false)
br label %for.inc
for.inc: ; preds = %for.body
ret void
}
-define void @NonZeroAddressSpace(i32 addrspace(2)* nocapture %ar, i64 %n, i64 %m) {
+define void @NonZeroAddressSpace(ptr addrspace(2) nocapture %ar, i64 %n, i64 %m) {
; CHECK-LABEL: @NonZeroAddressSpace(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[M:%.*]], 2
; CHECK: for.cond1.preheader:
; CHECK-NEXT: [[I_017:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC5:%.*]], [[FOR_INC4:%.*]] ]
; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[M]], [[I_017]]
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32 addrspace(2)* [[AR:%.*]], i64 [[TMP1]]
-; CHECK-NEXT: [[SCEVGEP1:%.*]] = bitcast i32 addrspace(2)* [[SCEVGEP]] to i8 addrspace(2)*
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, ptr addrspace(2) [[AR:%.*]], i64 [[TMP1]]
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i64 [[I_017]], [[M]]
-; CHECK-NEXT: call void @llvm.memset.p2i8.i64(i8 addrspace(2)* align 4 [[SCEVGEP1]], i8 0, i64 [[TMP0]], i1 false)
+; CHECK-NEXT: call void @llvm.memset.p2.i64(ptr addrspace(2) align 4 [[SCEVGEP]], i8 0, i64 [[TMP0]], i1 false)
; CHECK-NEXT: br label [[FOR_INC4]]
; CHECK: for.inc4:
; CHECK-NEXT: [[INC5]] = add nuw nsw i64 [[I_017]], 1
for.cond1.preheader: ; preds = %for.inc4, %entry
%i.017 = phi i64 [ 0, %entry ], [ %inc5, %for.inc4 ]
%1 = mul i64 %m, %i.017
- %scevgep = getelementptr i32, i32 addrspace(2)* %ar, i64 %1
- %scevgep1 = bitcast i32 addrspace(2)* %scevgep to i8 addrspace(2)*
+ %scevgep = getelementptr i32, ptr addrspace(2) %ar, i64 %1
%mul = mul nsw i64 %i.017, %m
- call void @llvm.memset.p2i8.i64(i8 addrspace(2)* align 4 %scevgep1, i8 0, i64 %0, i1 false)
+ call void @llvm.memset.p2.i64(ptr addrspace(2) align 4 %scevgep, i8 0, i64 %0, i1 false)
br label %for.inc4
for.inc4: ; preds = %for.cond1.preheader
; ar = ar + i;
; }
; }
-define void @NonAffinePointer(i32* %ar, i32 %n, i32 %m) {
+define void @NonAffinePointer(ptr %ar, i32 %n, i32 %m) {
; CHECK-LABEL: @NonAffinePointer(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[N:%.*]] to i64
; CHECK-NEXT: [[MUL3:%.*]] = mul i64 [[CONV2]], 4
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[AR_ADDR_03:%.*]] = phi i32* [ [[AR:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR4:%.*]], [[FOR_INC:%.*]] ]
+; CHECK-NEXT: [[AR_ADDR_03:%.*]] = phi ptr [ [[AR:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR4:%.*]], [[FOR_INC:%.*]] ]
; CHECK-NEXT: [[I_02:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i64 [[I_02]], [[CONV1]]
-; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[AR_ADDR_03]], i64 [[MUL]]
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[ADD_PTR]] to i8*
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 [[MUL3]], i1 false)
-; CHECK-NEXT: [[ADD_PTR4]] = getelementptr inbounds i32, i32* [[AR_ADDR_03]], i64 [[I_02]]
+; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, ptr [[AR_ADDR_03]], i64 [[MUL]]
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ADD_PTR]], i8 0, i64 [[MUL3]], i1 false)
+; CHECK-NEXT: [[ADD_PTR4]] = getelementptr inbounds i32, ptr [[AR_ADDR_03]], i64 [[I_02]]
; CHECK-NEXT: br label [[FOR_INC]]
; CHECK: for.inc:
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_02]], 1
br label %for.body
for.body: ; preds = %for.body.lr.ph, %for.inc
- %ar.addr.03 = phi i32* [ %ar, %for.body.lr.ph ], [ %add.ptr4, %for.inc ]
+ %ar.addr.03 = phi ptr [ %ar, %for.body.lr.ph ], [ %add.ptr4, %for.inc ]
%i.02 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
%mul = mul nsw i64 %i.02, %conv1
- %add.ptr = getelementptr inbounds i32, i32* %ar.addr.03, i64 %mul
- %0 = bitcast i32* %add.ptr to i8*
- call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 %mul3, i1 false)
- %add.ptr4 = getelementptr inbounds i32, i32* %ar.addr.03, i64 %i.02
+ %add.ptr = getelementptr inbounds i32, ptr %ar.addr.03, i64 %mul
+ call void @llvm.memset.p0.i64(ptr align 4 %add.ptr, i8 0, i64 %mul3, i1 false)
+ %add.ptr4 = getelementptr inbounds i32, ptr %ar.addr.03, i64 %i.02
br label %for.inc
for.inc: ; preds = %for.body
ret void
}
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg)
-declare void @llvm.memset.p2i8.i64(i8 addrspace(2)* nocapture writeonly, i8, i64, i1 immarg)
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
+declare void @llvm.memset.p2.i64(ptr addrspace(2) nocapture writeonly, i8, i64, i1 immarg)
; RUN: opt -passes="loop-idiom" < %s -S | FileCheck %s
-define dso_local void @double_memset(i8* nocapture %p) {
+define dso_local void @double_memset(ptr nocapture %p) {
; CHECK-LABEL: @double_memset(
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[P:%.*]], i8 0, i64 16, i1 false), !tbaa [[TBAA0:![0-9]+]]
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[P:%.*]], i8 0, i64 16, i1 false), !tbaa [[TBAA0:![0-9]+]]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[I_07:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[PTR1:%.*]] = getelementptr inbounds i8, i8* [[P]], i64 [[I_07]]
+; CHECK-NEXT: [[PTR1:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[I_07]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_07]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 16
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
for.body:
%i.07 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
- %ptr1 = getelementptr inbounds i8, i8* %p, i64 %i.07
- store i8 0, i8* %ptr1, align 1, !tbaa !5
+ %ptr1 = getelementptr inbounds i8, ptr %p, i64 %i.07
+ store i8 0, ptr %ptr1, align 1, !tbaa !5
%inc = add nuw nsw i64 %i.07, 1
%exitcond.not = icmp eq i64 %inc, 16
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
}
-define dso_local void @struct_memset(i8* nocapture %p) {
+define dso_local void @struct_memset(ptr nocapture %p) {
; CHECK-LABEL: @struct_memset(
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[P:%.*]], i8 0, i64 16, i1 false), !tbaa [[TBAA4:![0-9]+]]
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[P:%.*]], i8 0, i64 16, i1 false), !tbaa [[TBAA4:![0-9]+]]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[I_07:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[PTR1:%.*]] = getelementptr inbounds i8, i8* [[P]], i64 [[I_07]]
+; CHECK-NEXT: [[PTR1:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[I_07]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_07]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 16
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
for.body:
%i.07 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
- %ptr1 = getelementptr inbounds i8, i8* %p, i64 %i.07
- store i8 0, i8* %ptr1, align 1, !tbaa !10
+ %ptr1 = getelementptr inbounds i8, ptr %p, i64 %i.07
+ store i8 0, ptr %ptr1, align 1, !tbaa !10
%inc = add nuw nsw i64 %i.07, 1
%exitcond.not = icmp eq i64 %inc, 16
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
}
-define dso_local void @var_memset(i8* nocapture %p, i64 %len) {
+define dso_local void @var_memset(ptr nocapture %p, i64 %len) {
; CHECK-LABEL: @var_memset(
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[P:%.*]], i8 0, i64 [[LEN:%.*]], i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[P:%.*]], i8 0, i64 [[LEN:%.*]], i1 false)
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[I_07:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[PTR1:%.*]] = getelementptr inbounds i8, i8* [[P]], i64 [[I_07]]
+; CHECK-NEXT: [[PTR1:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[I_07]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_07]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[LEN]]
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
for.body:
%i.07 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
- %ptr1 = getelementptr inbounds i8, i8* %p, i64 %i.07
- store i8 0, i8* %ptr1, align 1, !tbaa !10
+ %ptr1 = getelementptr inbounds i8, ptr %p, i64 %i.07
+ store i8 0, ptr %ptr1, align 1, !tbaa !10
%inc = add nuw nsw i64 %i.07, 1
%exitcond.not = icmp eq i64 %inc, %len
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
}
-%struct.A = type { i32*, %struct.B }
-%struct.B = type { i32* }
+%struct.A = type { ptr, %struct.B }
+%struct.B = type { ptr }
-define dso_local void @adjacent_store_memset(%struct.A* nocapture %a, i64 %len) {
+define dso_local void @adjacent_store_memset(ptr nocapture %a, i64 %len) {
; CHECK-LABEL: @adjacent_store_memset(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A1:%.*]] = bitcast %struct.A* [[A:%.*]] to i8*
; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[LEN:%.*]], i64 1)
; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[UMAX]], 4
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[A1]], i8 0, i64 [[TMP0]], i1 false), !tbaa [[TBAA8:![0-9]+]]
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[A:%.*]], i8 0, i64 [[TMP0]], i1 false), !tbaa [[TBAA8:![0-9]+]]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[I_09:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[P:%.*]] = getelementptr inbounds [[STRUCT_A:%.*]], %struct.A* [[A]], i64 [[I_09]], i32 0
-; CHECK-NEXT: [[P2:%.*]] = getelementptr inbounds [[STRUCT_A]], %struct.A* [[A]], i64 [[I_09]], i32 1, i32 0
+; CHECK-NEXT: [[P:%.*]] = getelementptr inbounds [[STRUCT_A:%.*]], ptr [[A]], i64 [[I_09]], i32 0
+; CHECK-NEXT: [[P2:%.*]] = getelementptr inbounds [[STRUCT_A]], ptr [[A]], i64 [[I_09]], i32 1, i32 0
; CHECK-NEXT: [[INC]] = add i64 [[I_09]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], [[LEN]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
for.body:
%i.09 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
- %p = getelementptr inbounds %struct.A, %struct.A* %a, i64 %i.09, i32 0
- store i32* null, i32** %p, align 8, !tbaa !18
- %p2 = getelementptr inbounds %struct.A, %struct.A* %a, i64 %i.09, i32 1, i32 0
- store i32* null, i32** %p2, align 8, !tbaa !21
+ %p = getelementptr inbounds %struct.A, ptr %a, i64 %i.09, i32 0
+ store ptr null, ptr %p, align 8, !tbaa !18
+ %p2 = getelementptr inbounds %struct.A, ptr %a, i64 %i.09, i32 1, i32 0
+ store ptr null, ptr %p2, align 8, !tbaa !21
%inc = add i64 %i.09, 1
%cmp = icmp ult i64 %inc, %len
br i1 %cmp, label %for.body, label %for.cond.cleanup
; RUN: opt -passes=loop-idiom < %s -S | FileCheck %s
-define dso_local void @double_memset(i8* nocapture %p, i8* noalias nocapture %q, i32 %n) {
+define dso_local void @double_memset(ptr nocapture %p, ptr noalias nocapture %q, i32 %n) {
; CHECK-LABEL: @double_memset(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[N:%.*]], 0
; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
; CHECK: for.body.preheader:
; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[Q:%.*]], i8 0, i64 [[TMP0]], i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[Q:%.*]], i8 0, i64 [[TMP0]], i1 false)
; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[N]] to i64
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[P:%.*]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[P:%.*]], i8 0, i64 [[TMP1]], i1 false)
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup.loopexit:
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[I_07:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[P_ADDR_06:%.*]] = phi i8* [ [[INCDEC_PTR1:%.*]], [[FOR_BODY]] ], [ [[P]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[Q_ADDR_05:%.*]] = phi i8* [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[Q]], [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, i8* [[Q_ADDR_05]], i64 1
-; CHECK-NEXT: [[INCDEC_PTR1]] = getelementptr inbounds i8, i8* [[P_ADDR_06]], i64 1
+; CHECK-NEXT: [[P_ADDR_06:%.*]] = phi ptr [ [[INCDEC_PTR1:%.*]], [[FOR_BODY]] ], [ [[P]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[Q_ADDR_05:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[Q]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[Q_ADDR_05]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1]] = getelementptr inbounds i8, ptr [[P_ADDR_06]], i64 1
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_07]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[FOR_BODY]]
for.body:
%i.07 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
- %p.addr.06 = phi i8* [ %incdec.ptr1, %for.body ], [ %p, %entry ]
- %q.addr.05 = phi i8* [ %incdec.ptr, %for.body ], [ %q, %entry ]
- %incdec.ptr = getelementptr inbounds i8, i8* %q.addr.05, i64 1
- store i8 0, i8* %q.addr.05, align 1
- %incdec.ptr1 = getelementptr inbounds i8, i8* %p.addr.06, i64 1
- store i8 0, i8* %p.addr.06, align 1
+ %p.addr.06 = phi ptr [ %incdec.ptr1, %for.body ], [ %p, %entry ]
+ %q.addr.05 = phi ptr [ %incdec.ptr, %for.body ], [ %q, %entry ]
+ %incdec.ptr = getelementptr inbounds i8, ptr %q.addr.05, i64 1
+ store i8 0, ptr %q.addr.05, align 1
+ %incdec.ptr1 = getelementptr inbounds i8, ptr %p.addr.06, i64 1
+ store i8 0, ptr %p.addr.06, align 1
%inc = add nuw nsw i32 %i.07, 1
%exitcond.not = icmp eq i32 %inc, %n
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
; CHECK-LABEL: @memset(
; CHECK-NOT: llvm.memset
-define i8* @memset(i8* %b, i32 %c, i64 %len) nounwind uwtable ssp {
+define ptr @memset(ptr %b, i32 %c, i64 %len) nounwind uwtable ssp {
entry:
%cmp1 = icmp ult i64 0, %len
br i1 %cmp1, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%indvar = phi i64 [ 0, %for.body.lr.ph ], [ %indvar.next, %for.body ]
- %p.02 = getelementptr i8, i8* %b, i64 %indvar
- store i8 %conv6, i8* %p.02, align 1
+ %p.02 = getelementptr i8, ptr %b, i64 %indvar
+ store i8 %conv6, ptr %p.02, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp ne i64 %indvar.next, %len
br i1 %exitcond, label %for.body, label %for.cond.for.end_crit_edge
br label %for.end
for.end: ; preds = %for.cond.for.end_crit_edge, %entry
- ret i8* %b
+ ret ptr %b
}
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define void @test(i32* %currMB) nounwind uwtable {
+define void @test(ptr %currMB) nounwind uwtable {
entry:
br i1 undef, label %start.exit, label %if.then.i
unreachable
start.exit: ; preds = %entry
- indirectbr i8* undef, [label %0, label %for.bodyprime]
+ indirectbr ptr undef, [label %0, label %for.bodyprime]
; <label>:0 ; preds = %start.exit
unreachable
for.bodyprime: ; preds = %for.bodyprime, %start.exit
%i.057375 = phi i32 [ 0, %start.exit ], [ %1, %for.bodyprime ]
- %arrayidx8prime = getelementptr inbounds i32, i32* %currMB, i32 %i.057375
- store i32 0, i32* %arrayidx8prime, align 4
+ %arrayidx8prime = getelementptr inbounds i32, ptr %currMB, i32 %i.057375
+ store i32 0, ptr %arrayidx8prime, align 4
%1 = add i32 %i.057375, 1
%cmp5prime = icmp slt i32 %1, 4
br i1 %cmp5prime, label %for.bodyprime, label %for.endprime
; LIR'ing stores of pointers with address space 3 is fine, since
; they're integral pointers.
-define void @f_0(i8 addrspace(3)** %ptr) {
+define void @f_0(ptr %ptr) {
; CHECK-LABEL: @f_0(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[PTR1:%.*]] = bitcast i8 addrspace(3)** [[PTR:%.*]] to i8*
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[PTR1]], i8 0, i64 80000, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[PTR:%.*]], i8 0, i64 80000, i1 false)
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i8 addrspace(3)*, i8 addrspace(3)** [[PTR]], i64 [[INDVAR]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr ptr addrspace(3), ptr [[PTR]], i64 [[INDVAR]]
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 10000
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ]
- %arrayidx = getelementptr i8 addrspace(3)*, i8 addrspace(3)** %ptr, i64 %indvar
- store i8 addrspace(3)* null, i8 addrspace(3)** %arrayidx, align 4
+ %arrayidx = getelementptr ptr addrspace(3), ptr %ptr, i64 %indvar
+ store ptr addrspace(3) null, ptr %arrayidx, align 4
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, 10000
br i1 %exitcond, label %for.end, label %for.body
; they're non-integral pointers. NOTE: Zero is special value which
; can be converted, if we add said handling here, convert this test
; to use any non-null pointer.
-define void @f_1(i8 addrspace(4)** %ptr) {
+define void @f_1(ptr %ptr) {
; CHECK-LABEL: @f_1(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i8 addrspace(4)*, i8 addrspace(4)** [[PTR:%.*]], i64 [[INDVAR]]
-; CHECK-NEXT: store i8 addrspace(4)* null, i8 addrspace(4)** [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr ptr addrspace(4), ptr [[PTR:%.*]], i64 [[INDVAR]]
+; CHECK-NEXT: store ptr addrspace(4) null, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 10000
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ]
- %arrayidx = getelementptr i8 addrspace(4)*, i8 addrspace(4)** %ptr, i64 %indvar
- store i8 addrspace(4)* null, i8 addrspace(4)** %arrayidx, align 4
+ %arrayidx = getelementptr ptr addrspace(4), ptr %ptr, i64 %indvar
+ store ptr addrspace(4) null, ptr %arrayidx, align 4
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, 10000
br i1 %exitcond, label %for.end, label %for.body
}
; Same as previous case, but vector of non-integral pointers
-define void @f_2(i8 addrspace(4)** %ptr) {
+define void @f_2(ptr %ptr) {
; CHECK-LABEL: @f_2(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i8 addrspace(4)*, i8 addrspace(4)** [[PTR:%.*]], i64 [[INDVAR]]
-; CHECK-NEXT: [[ADDR:%.*]] = bitcast i8 addrspace(4)** [[ARRAYIDX]] to <2 x i8 addrspace(4)*>*
-; CHECK-NEXT: store <2 x i8 addrspace(4)*> zeroinitializer, <2 x i8 addrspace(4)*>* [[ADDR]], align 8
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr ptr addrspace(4), ptr [[PTR:%.*]], i64 [[INDVAR]]
+; CHECK-NEXT: store <2 x ptr addrspace(4)> zeroinitializer, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 2
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 10000
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ]
- %arrayidx = getelementptr i8 addrspace(4)*, i8 addrspace(4)** %ptr, i64 %indvar
- %addr = bitcast i8 addrspace(4)** %arrayidx to <2 x i8 addrspace(4)*>*
- store <2 x i8 addrspace(4)*> zeroinitializer, <2 x i8 addrspace(4)*>* %addr, align 8
+ %arrayidx = getelementptr ptr addrspace(4), ptr %ptr, i64 %indvar
+ store <2 x ptr addrspace(4)> zeroinitializer, ptr %arrayidx, align 8
%indvar.next = add i64 %indvar, 2
%exitcond = icmp eq i64 %indvar.next, 10000
br i1 %exitcond, label %for.end, label %for.body
; CHECK-LABEL: @test(
; CHECK-NOT: llvm.memset
-define void @test(%struct.bigBlock_t* %p) {
+define void @test(ptr %p) {
entry:
- %0 = getelementptr inbounds %struct.bigBlock_t, %struct.bigBlock_t* %p, i64 0, i32 0, i64 0, i64 0
br label %for.body
for.body: ; preds = %entry, %for.body
%index.02 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %dst.01 = phi float* [ %0, %entry ], [ %add.ptr2, %for.body ]
- %cast.i5 = bitcast float* %dst.01 to <4 x float>*
- store <4 x float> zeroinitializer, <4 x float>* %cast.i5, align 16, !nontemporal !0
- %add.ptr1 = getelementptr inbounds float, float* %dst.01, i64 4
- %cast.i = bitcast float* %add.ptr1 to <4 x float>*
- store <4 x float> zeroinitializer, <4 x float>* %cast.i, align 16, !nontemporal !0
- %add.ptr2 = getelementptr inbounds float, float* %dst.01, i64 8
+ %dst.01 = phi ptr [ %p, %entry ], [ %add.ptr2, %for.body ]
+ store <4 x float> zeroinitializer, ptr %dst.01, align 16, !nontemporal !0
+ %add.ptr1 = getelementptr inbounds float, ptr %dst.01, i64 4
+ store <4 x float> zeroinitializer, ptr %add.ptr1, align 16, !nontemporal !0
+ %add.ptr2 = getelementptr inbounds float, ptr %dst.01, i64 8
%add = add nuw nsw i32 %index.02, 32
%cmp = icmp ult i32 %add, 4096
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.body, %for.body.preheader
%indvars.iv = phi i32 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
- %add.ptr3 = getelementptr inbounds i32, i32* null, i32 %indvars.iv
- %add.ptr4 = getelementptr inbounds i32, i32* %add.ptr3, i32 1
- %0 = load i32, i32* %add.ptr4, align 4
- store i32 %0, i32* %add.ptr3, align 4
+ %add.ptr3 = getelementptr inbounds i32, ptr null, i32 %indvars.iv
+ %add.ptr4 = getelementptr inbounds i32, ptr %add.ptr3, i32 1
+ %0 = load i32, ptr %add.ptr4, align 4
+ store i32 %0, ptr %add.ptr3, align 4
%indvars.iv.next = add nsw i32 %indvars.iv, 1
%exitcond = icmp ne i32 %indvars.iv.next, 6
br i1 %exitcond, label %for.body, label %for.body.preheader
}
; CHECK-LABEL: define void @test1(
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 null, i8* align 4 inttoptr (i64 4 to i8*), i64 24, i1 false)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 null, ptr align 4 inttoptr (i64 4 to ptr), i64 24, i1 false)
; CHECK-NOT: store
define void @test1_no_null_opt() #0 {
for.body: ; preds = %for.body, %for.body.preheader
%indvars.iv = phi i32 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
- %add.ptr3 = getelementptr inbounds i32, i32* null, i32 %indvars.iv
- %add.ptr4 = getelementptr inbounds i32, i32* %add.ptr3, i32 1
- %0 = load i32, i32* %add.ptr4, align 4
- store i32 %0, i32* %add.ptr3, align 4
+ %add.ptr3 = getelementptr inbounds i32, ptr null, i32 %indvars.iv
+ %add.ptr4 = getelementptr inbounds i32, ptr %add.ptr3, i32 1
+ %0 = load i32, ptr %add.ptr4, align 4
+ store i32 %0, ptr %add.ptr3, align 4
%indvars.iv.next = add nsw i32 %indvars.iv, 1
%exitcond = icmp ne i32 %indvars.iv.next, 6
br i1 %exitcond, label %for.body, label %for.body.preheader
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=loop-idiom -S %s | FileCheck %s
-define void @reuse_cast_1(float** %ptr, i1 %c) {
+define void @reuse_cast_1(ptr %ptr, i1 %c) {
; CHECK-LABEL: @reuse_cast_1(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP_0:%.*]]
; CHECK: loop.0:
-; CHECK-NEXT: [[TMP:%.*]] = load float*, float** [[PTR:%.*]], align 8
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[TMP]] to i8*
+; CHECK-NEXT: [[TMP:%.*]] = load ptr, ptr [[PTR:%.*]], align 8
; CHECK-NEXT: br i1 [[C:%.*]], label [[LOOP_2_PREHEADER:%.*]], label [[LOOP_1_PREHEADER:%.*]]
; CHECK: loop.1.preheader:
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP2]], i8 0, i64 400, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP]], i8 0, i64 400, i1 false)
; CHECK-NEXT: br label [[LOOP_1:%.*]]
; CHECK: loop.2.preheader:
; CHECK-NEXT: br label [[LOOP_2:%.*]]
; CHECK: loop.1:
; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ [[IV_1_NEXT:%.*]], [[LOOP_1]] ], [ 0, [[LOOP_1_PREHEADER]] ]
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, float* [[TMP]], i64 [[IV_1]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP]], i64 [[IV_1]]
; CHECK-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 1
; CHECK-NEXT: [[C_1:%.*]] = icmp ne i64 [[IV_1_NEXT]], 100
; CHECK-NEXT: br i1 [[C_1]], label [[LOOP_1]], label [[LOOP_0_LATCH_LOOPEXIT1:%.*]]
; CHECK: loop.2:
; CHECK-NEXT: [[IV_2:%.*]] = phi i64 [ [[IV_2_NEXT:%.*]], [[LOOP_2]] ], [ 0, [[LOOP_2_PREHEADER]] ]
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, float* [[TMP]], i64 [[IV_2]]
-; CHECK-NEXT: store float 0.000000e+00, float* [[TMP10]], align 4
-; CHECK-NEXT: [[TMP11:%.*]] = load float*, float** [[PTR]], align 8
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, float* [[TMP11]], i64 [[IV_2]]
-; CHECK-NEXT: [[TMP13:%.*]] = load float, float* [[TMP12]], align 4
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[TMP]], i64 [[IV_2]]
+; CHECK-NEXT: store float 0.000000e+00, ptr [[TMP10]], align 4
+; CHECK-NEXT: [[TMP11:%.*]] = load ptr, ptr [[PTR]], align 8
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i64 [[IV_2]]
+; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[TMP12]], align 4
; CHECK-NEXT: [[IV_2_NEXT]] = add nuw nsw i64 [[IV_2]], 1
; CHECK-NEXT: [[C_2:%.*]] = icmp ne i64 [[IV_2_NEXT]], 100
; CHECK-NEXT: br i1 [[C_2]], label [[LOOP_2]], label [[LOOP_0_LATCH_LOOPEXIT:%.*]]
br label %loop.0
loop.0: ; preds = %loop.0.latch, %entry
- %tmp = load float*, float** %ptr, align 8
+ %tmp = load ptr, ptr %ptr, align 8
br i1 %c, label %loop.2, label %loop.1
loop.1: ; preds = %loop.1, %loop.0
%iv.1 = phi i64 [ %iv.1.next, %loop.1 ], [ 0, %loop.0 ]
- %tmp4 = getelementptr inbounds float, float* %tmp, i64 %iv.1
- store float 0.000000e+00, float* %tmp4, align 4
+ %tmp4 = getelementptr inbounds float, ptr %tmp, i64 %iv.1
+ store float 0.000000e+00, ptr %tmp4, align 4
%iv.1.next = add nuw nsw i64 %iv.1, 1
%c.1 = icmp ne i64 %iv.1.next, 100
br i1 %c.1, label %loop.1, label %loop.0.latch
loop.2: ; preds = %loop.2, %loop.0
%iv.2 = phi i64 [ %iv.2.next, %loop.2 ], [ 0, %loop.0 ]
- %tmp10 = getelementptr inbounds float, float* %tmp, i64 %iv.2
- store float 0.000000e+00, float* %tmp10, align 4
- %tmp11 = load float*, float** %ptr, align 8
- %tmp12 = getelementptr inbounds float, float* %tmp11, i64 %iv.2
- %tmp13 = load float, float* %tmp12, align 4
+ %tmp10 = getelementptr inbounds float, ptr %tmp, i64 %iv.2
+ store float 0.000000e+00, ptr %tmp10, align 4
+ %tmp11 = load ptr, ptr %ptr, align 8
+ %tmp12 = getelementptr inbounds float, ptr %tmp11, i64 %iv.2
+ %tmp13 = load float, ptr %tmp12, align 4
%iv.2.next = add nuw nsw i64 %iv.2, 1
%c.2 = icmp ne i64 %iv.2.next, 100
br i1 %c.2, label %loop.2, label %loop.0.latch
declare i1 @cond()
-declare void @use.i8(i8*)
+declare void @use.i8(ptr)
declare void @use.i1(i1)
-define void @reuse_cast_2(i32 %x, i32* %ptr.1.start) {
+define void @reuse_cast_2(i32 %x, ptr %ptr.1.start) {
; CHECK-LABEL: @reuse_cast_2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[PTR_1_START2:%.*]] = bitcast i32* [[PTR_1_START:%.*]] to i8*
; CHECK-NEXT: [[STACK:%.*]] = alloca [2 x i32], align 4
-; CHECK-NEXT: [[STACK1:%.*]] = bitcast [2 x i32]* [[STACK]] to i8*
; CHECK-NEXT: [[C_0:%.*]] = icmp sgt i32 [[X:%.*]], 0
-; CHECK-NEXT: [[CAST_TO_REUSE:%.*]] = bitcast [2 x i32]* [[STACK]] to i8*
-; CHECK-NEXT: [[PTR_2_START:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[STACK]], i64 0, i64 0
-; CHECK-NEXT: call void @use.i8(i8* [[CAST_TO_REUSE]])
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[STACK1]], i8* align 4 [[PTR_1_START2]], i64 8, i1 false)
+; CHECK-NEXT: call void @use.i8(ptr [[STACK]])
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[STACK]], ptr align 4 [[PTR_1_START:%.*]], i64 8, i1 false)
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[PTR_1:%.*]] = phi i32* [ [[PTR_1_START]], [[ENTRY]] ], [ [[PTR_1_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[PTR_2:%.*]] = phi i32* [ [[PTR_2_START]], [[ENTRY]] ], [ [[PTR_2_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[PTR_1_NEXT]] = getelementptr inbounds i32, i32* [[PTR_1]], i64 1
-; CHECK-NEXT: [[LV:%.*]] = load i32, i32* [[PTR_1]], align 4
-; CHECK-NEXT: [[PTR_2_NEXT]] = getelementptr inbounds i32, i32* [[PTR_2]], i64 1
+; CHECK-NEXT: [[PTR_1:%.*]] = phi ptr [ [[PTR_1_START]], [[ENTRY]] ], [ [[PTR_1_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[PTR_2:%.*]] = phi ptr [ [[STACK]], [[ENTRY]] ], [ [[PTR_2_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[PTR_1_NEXT]] = getelementptr inbounds i32, ptr [[PTR_1]], i64 1
+; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[PTR_1]], align 4
+; CHECK-NEXT: [[PTR_2_NEXT]] = getelementptr inbounds i32, ptr [[PTR_2]], i64 1
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[C_1:%.*]] = icmp eq i32 [[IV]], 0
; CHECK-NEXT: br i1 [[C_1]], label [[LOOP]], label [[EXIT:%.*]]
entry:
%stack = alloca [2 x i32], align 4
%c.0 = icmp sgt i32 %x, 0
- %cast.to.reuse = bitcast [2 x i32]* %stack to i8*
- %ptr.2.start = getelementptr inbounds [2 x i32], [2 x i32]* %stack, i64 0, i64 0
- call void @use.i8(i8* %cast.to.reuse)
+ call void @use.i8(ptr %stack)
br label %loop
loop: ; preds = %loop, %entry
%iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
- %ptr.1 = phi i32* [ %ptr.1.start, %entry ], [ %ptr.1.next, %loop ]
- %ptr.2 = phi i32* [ %ptr.2.start, %entry ], [ %ptr.2.next, %loop ]
- %ptr.1.next = getelementptr inbounds i32, i32* %ptr.1, i64 1
- %lv = load i32, i32* %ptr.1, align 4
- %ptr.2.next = getelementptr inbounds i32, i32* %ptr.2, i64 1
- store i32 %lv, i32* %ptr.2, align 4
+ %ptr.1 = phi ptr [ %ptr.1.start, %entry ], [ %ptr.1.next, %loop ]
+ %ptr.2 = phi ptr [ %stack, %entry ], [ %ptr.2.next, %loop ]
+ %ptr.1.next = getelementptr inbounds i32, ptr %ptr.1, i64 1
+ %lv = load i32, ptr %ptr.1, align 4
+ %ptr.2.next = getelementptr inbounds i32, ptr %ptr.2, i64 1
+ store i32 %lv, ptr %ptr.2, align 4
%iv.next = add nuw nsw i32 %iv, 1
%c.1 = icmp eq i32 %iv, 0
br i1 %c.1, label %loop, label %exit
for.cond: ; preds = %for.inc, %entry
%backslashes.0 = phi i32 [ undef, %entry ], [ %backslashes.2, %for.inc ]
- %p.0 = phi i8* [ undef, %entry ], [ %incdec.ptr3, %for.inc ]
- %q.0 = phi i8* [ undef, %entry ], [ %q.2, %for.inc ]
- %0 = load i8, i8* %p.0, align 1
+ %p.0 = phi ptr [ undef, %entry ], [ %incdec.ptr3, %for.inc ]
+ %q.0 = phi ptr [ undef, %entry ], [ %q.2, %for.inc ]
+ %0 = load i8, ptr %p.0, align 1
switch i8 %0, label %while.cond.preheader [
i8 0, label %for.cond4.preheader
i8 92, label %for.inc
br label %for.body6
while.body: ; preds = %while.body.lr.ph, %while.body
- %q.112 = phi i8* [ %q.0, %while.body.lr.ph ], [ %incdec.ptr, %while.body ]
+ %q.112 = phi ptr [ %q.0, %while.body.lr.ph ], [ %incdec.ptr, %while.body ]
%backslashes.111 = phi i32 [ %backslashes.0, %while.body.lr.ph ], [ %dec, %while.body ]
- %incdec.ptr = getelementptr inbounds i8, i8* %q.112, i64 1
- store i8 92, i8* %incdec.ptr, align 1
+ %incdec.ptr = getelementptr inbounds i8, ptr %q.112, i64 1
+ store i8 92, ptr %incdec.ptr, align 1
%dec = add nsw i32 %backslashes.111, -1
%tobool2 = icmp eq i32 %dec, 0
br i1 %tobool2, label %while.cond.for.inc.loopexit_crit_edge, label %while.body
while.cond.for.inc.loopexit_crit_edge: ; preds = %while.body
%scevgep.sum = add i64 %2, 1
- %scevgep13 = getelementptr i8, i8* %q.0, i64 %scevgep.sum
+ %scevgep13 = getelementptr i8, ptr %q.0, i64 %scevgep.sum
br label %for.inc.loopexit
for.inc.loopexit: ; preds = %while.cond.for.inc.loopexit_crit_edge, %while.cond.preheader
- %q.1.lcssa = phi i8* [ %scevgep13, %while.cond.for.inc.loopexit_crit_edge ], [ %q.0, %while.cond.preheader ]
+ %q.1.lcssa = phi ptr [ %scevgep13, %while.cond.for.inc.loopexit_crit_edge ], [ %q.0, %while.cond.preheader ]
br label %for.inc
for.inc: ; preds = %for.inc.loopexit, %for.cond
%backslashes.2 = phi i32 [ %backslashes.0, %for.cond ], [ 0, %for.inc.loopexit ]
- %q.2 = phi i8* [ %q.0, %for.cond ], [ %q.1.lcssa, %for.inc.loopexit ]
- %incdec.ptr3 = getelementptr inbounds i8, i8* %p.0, i64 1
+ %q.2 = phi ptr [ %q.0, %for.cond ], [ %q.1.lcssa, %for.inc.loopexit ]
+ %incdec.ptr3 = getelementptr inbounds i8, ptr %p.0, i64 1
br label %for.cond
for.body6: ; preds = %for.body6.lr.ph, %for.body6
- %q.39 = phi i8* [ %q.0, %for.body6.lr.ph ], [ %incdec.ptr7, %for.body6 ]
+ %q.39 = phi ptr [ %q.0, %for.body6.lr.ph ], [ %incdec.ptr7, %for.body6 ]
%backslashes.38 = phi i32 [ %backslashes.0, %for.body6.lr.ph ], [ %dec9, %for.body6 ]
- %incdec.ptr7 = getelementptr inbounds i8, i8* %q.39, i64 1
- store i8 92, i8* %incdec.ptr7, align 1
+ %incdec.ptr7 = getelementptr inbounds i8, ptr %q.39, i64 1
+ store i8 92, ptr %incdec.ptr7, align 1
%dec9 = add nsw i32 %backslashes.38, -1
%tobool5 = icmp eq i32 %dec9, 0
br i1 %tobool5, label %for.cond4.for.end10_crit_edge, label %for.body6
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i16, i16* [[BASE]], i64 [[INDVAR]]
-; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i16, i16* [[DEST]], i64 [[INDVAR]]
-; CHECK-NEXT: [[V:%.*]] = load atomic i16, i16* [[I_0_014]] unordered, align 2
-; CHECK-NEXT: store atomic i16 [[V]], i16* [[DESTI]] unordered, align 2
+; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i16, ptr [[BASE]], i64 [[INDVAR]]
+; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i16, ptr [[DEST]], i64 [[INDVAR]]
+; CHECK-NEXT: [[V:%.*]] = load atomic i16, ptr [[I_0_014]] unordered, align 2
+; CHECK-NEXT: store atomic i16 [[V]], ptr [[DESTI]] unordered, align 2
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i16, i16* %Base, i64 %indvar
- %DestI = getelementptr i16, i16* %Dest, i64 %indvar
- %V = load atomic i16, i16* %I.0.014 unordered, align 2
- store atomic i16 %V, i16* %DestI unordered, align 2
+ %I.0.014 = getelementptr i16, ptr %Base, i64 %indvar
+ %DestI = getelementptr i16, ptr %Dest, i64 %indvar
+ %V = load atomic i16, ptr %I.0.014 unordered, align 2
+ store atomic i16 %V, ptr %DestI unordered, align 2
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
br i1 %exitcond, label %for.end, label %for.body
; f[i+1] = 0;
; }
;}
-define void @test(i32* %f, i32 %n) nounwind ssp {
+define void @test(ptr %f, i32 %n) nounwind ssp {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[F1:%.*]] = bitcast i32* [[F:%.*]] to i8*
; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[N:%.*]], 1
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[TMP0]], 0
; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_END:%.*]], label [[FOR_BODY_PREHEADER:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 1
; CHECK-NEXT: [[TMP3:%.*]] = shl i32 [[TMP2]], 3
; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], 8
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 4 [[F1]], i8 0, i32 [[TMP4]], i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 4 [[F:%.*]], i8 0, i32 [[TMP4]], i1 false)
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[F]], i32 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[F]], i32 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP5:%.*]] = or i32 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[F]], i32 [[TMP5]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[F]], i32 [[TMP5]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i32 [[INDVARS_IV]], 2
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INDVARS_IV_NEXT]], [[TMP0]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT:%.*]]
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i32 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %f, i32 %indvars.iv
- store i32 0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %f, i32 %indvars.iv
+ store i32 0, ptr %arrayidx, align 4
%1 = or i32 %indvars.iv, 1
- %arrayidx2 = getelementptr inbounds i32, i32* %f, i32 %1
- store i32 0, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %f, i32 %1
+ store i32 0, ptr %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i32 %indvars.iv, 2
%cmp = icmp ult i32 %indvars.iv.next, %0
br i1 %cmp, label %for.body, label %for.end.loopexit
; f[i+1] = 2;
; }
;}
-define void @test_pattern(i32* %f, i32 %n) nounwind ssp {
+define void @test_pattern(ptr %f, i32 %n) nounwind ssp {
; CHECK-LABEL: @test_pattern(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[F1:%.*]] = bitcast i32* [[F:%.*]] to i8*
; CHECK-NEXT: [[MUL:%.*]] = shl i32 [[N:%.*]], 1
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[MUL]], 0
; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_END:%.*]], label [[FOR_BODY_PREHEADER:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[TMP0]], 1
; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[TMP1]], 3
; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP2]], 8
-; CHECK-NEXT: call void @memset_pattern16(i8* [[F1]], i8* bitcast ([4 x i32]* @.memset_pattern to i8*), i32 [[TMP3]])
+; CHECK-NEXT: call void @memset_pattern16(ptr [[F:%.*]], ptr @.memset_pattern, i32 [[TMP3]])
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[F]], i32 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[F]], i32 [[INDVARS_IV]]
; CHECK-NEXT: [[X1:%.*]] = or i32 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[F]], i32 [[X1]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[F]], i32 [[X1]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i32 [[INDVARS_IV]], 2
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INDVARS_IV_NEXT]], [[MUL]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT:%.*]]
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i32 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %f, i32 %indvars.iv
- store i32 2, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %f, i32 %indvars.iv
+ store i32 2, ptr %arrayidx, align 4
%x1 = or i32 %indvars.iv, 1
- %arrayidx2 = getelementptr inbounds i32, i32* %f, i32 %x1
- store i32 2, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %f, i32 %x1
+ store i32 2, ptr %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i32 %indvars.iv, 2
%cmp = icmp ult i32 %indvars.iv.next, %mul
br i1 %cmp, label %for.body, label %for.end.loopexit
; f[i+1] = 0;
; }
;}
-define void @test(i32* %f, i32 %n) nounwind ssp {
+define void @test(ptr %f, i32 %n) nounwind ssp {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[F1:%.*]] = bitcast i32* [[F:%.*]] to i8*
; CHECK-NEXT: [[MUL:%.*]] = shl i32 [[N:%.*]], 1
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[MUL]], 0
; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_END:%.*]], label [[FOR_BODY_PREHEADER:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 1
; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 3
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP3]], 8
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[F1]], i8 0, i64 [[TMP4]], i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[F:%.*]], i8 0, i64 [[TMP4]], i1 false)
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP5:%.*]] = or i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[TMP5]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[TMP5]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 2
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT]], [[TMP0]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT:%.*]]
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %f, i64 %indvars.iv
- store i32 0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %f, i64 %indvars.iv
+ store i32 0, ptr %arrayidx, align 4
%1 = or i64 %indvars.iv, 1
- %arrayidx2 = getelementptr inbounds i32, i32* %f, i64 %1
- store i32 0, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %f, i64 %1
+ store i32 0, ptr %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
%cmp = icmp ult i64 %indvars.iv.next, %0
br i1 %cmp, label %for.body, label %for.end.loopexit
; f[i+1] = 2;
; }
;}
-define void @test_pattern(i32* %f, i32 %n) nounwind ssp {
+define void @test_pattern(ptr %f, i32 %n) nounwind ssp {
; CHECK-LABEL: @test_pattern(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[F1:%.*]] = bitcast i32* [[F:%.*]] to i8*
; CHECK-NEXT: [[MUL:%.*]] = shl i32 [[N:%.*]], 1
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[MUL]], 0
; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_END:%.*]], label [[FOR_BODY_PREHEADER:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 1
; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 3
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP3]], 8
-; CHECK-NEXT: call void @memset_pattern16(i8* [[F1]], i8* bitcast ([4 x i32]* @.memset_pattern to i8*), i64 [[TMP4]])
+; CHECK-NEXT: call void @memset_pattern16(ptr [[F:%.*]], ptr @.memset_pattern, i64 [[TMP4]])
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP5:%.*]] = or i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[TMP5]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[TMP5]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 2
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT]], [[TMP0]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT:%.*]]
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %f, i64 %indvars.iv
- store i32 2, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %f, i64 %indvars.iv
+ store i32 2, ptr %arrayidx, align 4
%1 = or i64 %indvars.iv, 1
- %arrayidx2 = getelementptr inbounds i32, i32* %f, i64 %1
- store i32 2, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %f, i64 %1
+ store i32 2, ptr %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
%cmp = icmp ult i64 %indvars.iv.next, %0
br i1 %cmp, label %for.body, label %for.end.loopexit
define void @e() local_unnamed_addr {
; CHECK-LABEL: @e(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[D0:%.*]] = load i32, i32* @a, align 4
-; CHECK-NEXT: [[D1:%.*]] = load i32, i32* @b, align 4
+; CHECK-NEXT: [[D0:%.*]] = load i32, ptr @a, align 4
+; CHECK-NEXT: [[D1:%.*]] = load i32, ptr @b, align 4
; CHECK-NEXT: br label [[FOR_COND1THREAD_PRE_SPLIT:%.*]]
; CHECK: for.cond1thread-pre-split.loopexit:
; CHECK-NEXT: br label [[FOR_COND1THREAD_PRE_SPLIT]]
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_COND1THREAD_PRE_SPLIT]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY5]] ]
; CHECK-NEXT: [[DIVX:%.*]] = sext i32 [[DIV]] to i64
; CHECK-NEXT: [[TMP0:%.*]] = add nsw i64 [[DIVX]], [[INDVARS_IV]]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1 x i8], [1 x i8]* @c, i64 0, i64 [[TMP0]]
-; CHECK-NEXT: store i8 0, i8* [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1 x i8], ptr @c, i64 0, i64 [[TMP0]]
+; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
; CHECK-NEXT: [[TOBOOL4:%.*]] = icmp eq i32 [[TMP1]], 0
; CHECK-NEXT: br i1 [[TOBOOL4]], label [[FOR_COND1THREAD_PRE_SPLIT_LOOPEXIT:%.*]], label [[FOR_BODY5]]
;
entry:
- %d0 = load i32, i32* @a, align 4
- %d1 = load i32, i32* @b, align 4
+ %d0 = load i32, ptr @a, align 4
+ %d1 = load i32, ptr @b, align 4
br label %for.cond1thread-pre-split
for.cond1thread-pre-split: ; preds = %for.body5, %entry
%indvars.iv = phi i64 [ 0, %for.cond1thread-pre-split ], [ %indvars.iv.next, %for.body5 ]
%divx = sext i32 %div to i64
%0 = add nsw i64 %divx, %indvars.iv
- %arrayidx = getelementptr inbounds [1 x i8], [1 x i8]* @c, i64 0, i64 %0
- store i8 0, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds [1 x i8], ptr @c, i64 0, i64 %0
+ store i8 0, ptr %arrayidx, align 1
%indvars.iv.next = add nsw i64 %indvars.iv, 1
%1 = trunc i64 %indvars.iv.next to i32
%tobool4 = icmp eq i32 %1, 0
; The loop's trip count is depending on an unsafe operation
; udiv. SCEV expander hoists it out of the loop, so loop-idiom
; should check that the memset is not generated in this case.
-define void @f(i32 %a, i32 %b, i8* nocapture %x) local_unnamed_addr {
+define void @f(i32 %a, i32 %b, ptr nocapture %x) local_unnamed_addr {
; CHECK-LABEL: @f(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-NEXT: br label [[FOR_BODY6:%.*]]
; CHECK: for.body6:
; CHECK-NEXT: [[I_09:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY6]] ], [ 0, [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[X:%.*]], i64 [[I_09]]
-; CHECK-NEXT: store i8 0, i8* [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[X:%.*]], i64 [[I_09]]
+; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_09]], 1
; CHECK-NEXT: [[CMP3:%.*]] = icmp slt i64 [[INC]], [[CONV]]
; CHECK-NEXT: br i1 [[CMP3]], label [[FOR_BODY6]], label [[FOR_BODY_LOOPEXIT:%.*]]
for.body6: ; preds = %for.body6, %for.body
%i.09 = phi i64 [ %inc, %for.body6 ], [ 0, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %x, i64 %i.09
- store i8 0, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %x, i64 %i.09
+ store i8 0, ptr %arrayidx, align 1
%inc = add nuw nsw i64 %i.09, 1
%cmp3 = icmp slt i64 %inc, %conv
br i1 %cmp3, label %for.body6, label %for.body
declare void @ff()
-define void @test(i8* noalias nocapture %base, i64 %size) #1 {
+define void @test(ptr noalias nocapture %base, i64 %size) #1 {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i64 [[SIZE:%.*]], 0
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: tail call void @ff()
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[BASE:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: store i8 0, i8* [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BASE:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[SIZE]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT:%.*]]
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
tail call void @ff()
- %arrayidx = getelementptr inbounds i8, i8* %base, i64 %indvars.iv
- store i8 0, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %base, i64 %indvars.iv
+ store i8 0, ptr %arrayidx, align 1
%indvars.iv.next = add i64 %indvars.iv, 1
%exitcond = icmp ne i64 %indvars.iv.next, %size
br i1 %exitcond, label %for.body, label %for.end.loopexit
for2:
%indvars.iv = phi i64 [ %indvars.iv.next, %for2 ], [ 1, %for1.header ]
call void @foo(i64 %indvars.iv23)
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv, i64 %indvars.iv23
- %lv = load i32, i32* %arrayidx5
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %indvars.iv, i64 %indvars.iv23
+ %lv = load i32, ptr %arrayidx5
%add = add nsw i32 %lv, %k
- store i32 %add, i32* %arrayidx5
+ store i32 %add, ptr %arrayidx5
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv, 99
br i1 %exitcond, label %for2.loopexit , label %for2
for2:
%indvars.iv = phi i64 [ %indvars.iv.next, %for2 ], [ 1, %for1.header ]
call void @bar(i64 %indvars.iv23)
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv, i64 %indvars.iv23
- %lv = load i32, i32* %arrayidx5
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %indvars.iv, i64 %indvars.iv23
+ %lv = load i32, ptr %arrayidx5
%add = add nsw i32 %lv, %k
- store i32 %add, i32* %arrayidx5
+ store i32 %add, ptr %arrayidx5
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv, 99
br i1 %exitcond, label %for2.loopexit , label %for2
for.body4:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body4 ], [ 1, %for.cond1.preheader ]
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %arrayidx7 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv.next, i64 %indvars.iv.next29
- %2 = load i32, i32* %arrayidx7
+ %arrayidx7 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %indvars.iv.next, i64 %indvars.iv.next29
+ %2 = load i32, ptr %arrayidx7
%add8 = add nsw i32 %2, %k
- store i32 %add8, i32* %arrayidx7
+ store i32 %add8, ptr %arrayidx7
%lftr.wideiv = trunc i64 %indvars.iv to i32
%exitcond = icmp eq i32 %lftr.wideiv, %0
br i1 %exitcond, label %for.cond.loopexit, label %for.body4
for2:
%j = phi i64 [ %j.next, %latch ], [ 0, %for1.header ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* @C, i64 0, i64 %j, i64 %j23
- %lv = load i64, i64* %arrayidx5
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i64]], ptr @C, i64 0, i64 %j, i64 %j23
+ %lv = load i64, ptr %arrayidx5
%add = add nsw i64 %lv, %k
- store i64 %add, i64* %arrayidx5
+ store i64 %add, ptr %arrayidx5
%exitcond = icmp eq i64 %j, 99
br i1 %exitcond, label %for1.inc10, label %latch
latch:
for2:
%j = phi i64 [ %j.next, %for2 ], [ 0, %for1.header ]
call void @llvm.dbg.value(metadata i64 %j, metadata !13, metadata !DIExpression()), !dbg !14
- %arrayidx5 = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* @A, i64 0, i64 %j, i64 %j23
- %lv = load i64, i64* %arrayidx5
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i64]], ptr @A, i64 0, i64 %j, i64 %j23
+ %lv = load i64, ptr %arrayidx5
%add = add nsw i64 %lv, %k
- store i64 %add, i64* %arrayidx5
+ store i64 %add, ptr %arrayidx5
%j.next = add nuw nsw i64 %j, 1
%exitcond = icmp eq i64 %j, 99
call void @llvm.dbg.value(metadata i64 %j, metadata !13, metadata !DIExpression()), !dbg !14
for2:
%j = phi i64 [ %j.next, %for2 ], [ 0, %for1.header ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* @A, i64 0, i64 %j, i64 %i
- %lv = load i64, i64* %arrayidx5
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i64]], ptr @A, i64 0, i64 %j, i64 %i
+ %lv = load i64, ptr %arrayidx5
%add = add nsw i64 %lv, %k
- store i64 %add, i64* %arrayidx5
+ store i64 %add, ptr %arrayidx5
%j.next = add nuw nsw i64 %j, 1
%exitcond = icmp eq i64 %j, %i
br i1 %exitcond, label %for1.inc10, label %for2
for2:
%j = phi i64 [ %j.next, %for2 ], [ 0, %for1.header ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* @A, i64 0, i64 %j, i64 %i
- %lv = load i64, i64* %arrayidx5
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i64]], ptr @A, i64 0, i64 %j, i64 %i
+ %lv = load i64, ptr %arrayidx5
%add = add nsw i64 %lv, %k
- store i64 %add, i64* %arrayidx5
+ store i64 %add, ptr %arrayidx5
%0 = add nuw nsw i64 %j, %i
%j.next = add nuw nsw i64 %j, 1
%exitcond = icmp eq i64 %0, 100
for2:
%j = phi i64 [ %j.next, %for2 ], [ 0, %for1.header ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* @A, i64 0, i64 %j, i64 %i
- %lv = load i64, i64* %arrayidx5
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i64]], ptr @A, i64 0, i64 %j, i64 %i
+ %lv = load i64, ptr %arrayidx5
%add = add nsw i64 %lv, %k
- store i64 %add, i64* %arrayidx5
+ store i64 %add, ptr %arrayidx5
%j.next = add nuw nsw i64 %j, 1
%exitcond = icmp ne i64 %i, %j
br i1 %exitcond, label %for2, label %for1.inc10
define void @interchange_04(i64 %k) {
entry:
- %0 = load i64, i64* @N, align 4
+ %0 = load i64, ptr @N, align 4
br label %for1.header
for1.header:
for2:
%j = phi i64 [ %j.next, %for2 ], [ 0, %for1.header ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* @A, i64 0, i64 %j, i64 %i
- %lv = load i64, i64* %arrayidx5
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i64]], ptr @A, i64 0, i64 %j, i64 %i
+ %lv = load i64, ptr %arrayidx5
%add = add nsw i64 %lv, %k
- store i64 %add, i64* %arrayidx5
+ store i64 %add, ptr %arrayidx5
%j.next = add nuw nsw i64 %j, 1
%exitcond = icmp ne i64 %0, %j
br i1 %exitcond, label %for2, label %for1.inc10
for.body3.lr.ph: ; preds = %for.cond1.for.inc6_crit_edge, %entry
%indvars.iv18 = phi i64 [ %indvars.iv.next19, %for.cond1.for.inc6_crit_edge ], [ 1, %entry ]
- %X.promoted = load i32, i32* @X
+ %X.promoted = load i32, ptr @X
br label %for.body3
for.body3: ; preds = %for.body3, %for.body3.lr.ph
%indvars.iv = phi i64 [ 1, %for.body3.lr.ph ], [ %indvars.iv.next, %for.body3 ]
%add15 = phi i32 [ %X.promoted, %for.body3.lr.ph ], [ %add, %for.body3 ]
- %arrayidx5 = getelementptr inbounds [500 x [500 x i32]], [500 x [500 x i32]]* @A, i64 0, i64 %indvars.iv, i64 %indvars.iv18
- %0 = load i32, i32* %arrayidx5
+ %arrayidx5 = getelementptr inbounds [500 x [500 x i32]], ptr @A, i64 0, i64 %indvars.iv, i64 %indvars.iv18
+ %0 = load i32, ptr %arrayidx5
%add = add nsw i32 %add15, %0
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.cond1.for.inc6_crit_edge: ; preds = %for.body3
%add.lcssa = phi i32 [ %add, %for.body3 ]
- store i32 %add.lcssa, i32* @X
+ store i32 %add.lcssa, ptr @X
%indvars.iv.next19 = add nuw nsw i64 %indvars.iv18, 1
%lftr.wideiv20 = trunc i64 %indvars.iv.next19 to i32
%exitcond21 = icmp eq i32 %lftr.wideiv20, %N
for.cond4.preheader.lr.ph: ; preds = %for.cond1.for.inc17_crit_edge, %entry
%indvars.iv41 = phi i64 [ %indvars.iv.next42, %for.cond1.for.inc17_crit_edge ], [ 1, %entry ]
- %Y.promoted = load i32, i32* @Y
+ %Y.promoted = load i32, ptr @Y
br label %for.body6.lr.ph
for.body6.lr.ph: ; preds = %for.cond4.for.end_crit_edge, %for.cond4.preheader.lr.ph
%indvars.iv37 = phi i64 [ 1, %for.cond4.preheader.lr.ph ], [ %indvars.iv.next38, %for.cond4.for.end_crit_edge ]
%add1334 = phi i32 [ %Y.promoted, %for.cond4.preheader.lr.ph ], [ %add13, %for.cond4.for.end_crit_edge ]
- %X.promoted = load i32, i32* @X
+ %X.promoted = load i32, ptr @X
br label %for.body6
for.body6: ; preds = %for.body6, %for.body6.lr.ph
%indvars.iv = phi i64 [ 1, %for.body6.lr.ph ], [ %indvars.iv.next, %for.body6 ]
- %arrayidx8 = getelementptr inbounds [500 x [500 x i32]], [500 x [500 x i32]]* @A, i64 0, i64 %indvars.iv, i64 %indvars.iv37
- %0 = load i32, i32* %arrayidx8
+ %arrayidx8 = getelementptr inbounds [500 x [500 x i32]], ptr @A, i64 0, i64 %indvars.iv, i64 %indvars.iv37
+ %0 = load i32, ptr %arrayidx8
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %N
br i1 %exitcond, label %for.cond4.for.end_crit_edge, label %for.body6
for.cond4.for.end_crit_edge: ; preds = %for.body6
- %arrayidx12 = getelementptr inbounds [500 x [500 x i32]], [500 x [500 x i32]]* @B, i64 0, i64 %indvars.iv37, i64 %indvars.iv41
- %1 = load i32, i32* %arrayidx12
+ %arrayidx12 = getelementptr inbounds [500 x [500 x i32]], ptr @B, i64 0, i64 %indvars.iv37, i64 %indvars.iv41
+ %1 = load i32, ptr %arrayidx12
%add13 = add nsw i32 %add1334, %1
%indvars.iv.next38 = add nuw nsw i64 %indvars.iv37, 1
%lftr.wideiv39 = trunc i64 %indvars.iv.next38 to i32
for.cond1.for.inc17_crit_edge: ; preds = %for.cond4.for.end_crit_edge
%add13.lcssa = phi i32 [ %add13, %for.cond4.for.end_crit_edge ]
- store i32 %add13.lcssa, i32* @Y
+ store i32 %add13.lcssa, ptr @Y
%indvars.iv.next42 = add nuw nsw i64 %indvars.iv41, 1
%lftr.wideiv43 = trunc i64 %indvars.iv.next42 to i32
%exitcond44 = icmp eq i32 %lftr.wideiv43, %N
; CHECK: Not interchanging loops. Cannot prove legality.
define void @innermost_latch_uses_values_in_middle_header() {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%b = add i32 80, 1
br label %outermost.header
br label %innermost.body
innermost.body: ; preds = %innermost.header
- %arrayidx9.i = getelementptr inbounds [1 x [6 x i32]], [1 x [6 x i32]]* @d, i64 0, i64 %indvar.innermost, i64 %indvar.middle
- store i32 0, i32* %arrayidx9.i, align 4
+ %arrayidx9.i = getelementptr inbounds [1 x [6 x i32]], ptr @d, i64 0, i64 %indvar.innermost, i64 %indvar.middle
+ store i32 0, ptr %arrayidx9.i, align 4
br label %innermost.latch
innermost.latch: ; preds = %innermost.body
for.body: ; preds = %for.cond.cleanup4, %entry
%indvars.iv45 = phi i64 [ 0, %entry ], [ %indvars.iv.next46, %for.cond.cleanup4 ]
%call = call double @fn1()
- %arrayidx = getelementptr inbounds [100 x double], [100 x double]* @T, i64 0, i64 %indvars.iv45
- store double %call, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds [100 x double], ptr @T, i64 0, i64 %indvars.iv45
+ store double %call, ptr %arrayidx, align 8
br label %for.cond6.preheader
for.cond6.preheader: ; preds = %for.cond.cleanup8, %for.body
br label %for.body9
for.cond.cleanup4: ; preds = %for.cond.cleanup8
- %tmp = load double, double* %arrayidx, align 8
+ %tmp = load double, ptr %arrayidx, align 8
call void @fn2(double %tmp)
%indvars.iv.next46 = add nuw nsw i64 %indvars.iv45, 1
%exitcond47 = icmp ne i64 %indvars.iv.next46, 100
for.body9: ; preds = %for.body9, %for.cond6.preheader
%indvars.iv = phi i64 [ 1, %for.cond6.preheader ], [ %indvars.iv.next, %for.body9 ]
- %arrayidx13 = getelementptr inbounds [1000 x [1000 x i32]], [1000 x [1000 x i32]]* @Arr, i64 0, i64 %indvars.iv, i64 %indvars.iv42
- %tmp1 = load i32, i32* %arrayidx13, align 4
+ %arrayidx13 = getelementptr inbounds [1000 x [1000 x i32]], ptr @Arr, i64 0, i64 %indvars.iv, i64 %indvars.iv42
+ %tmp1 = load i32, ptr %arrayidx13, align 4
%tmp2 = trunc i64 %indvars.iv45 to i32
%add = add nsw i32 %tmp1, %tmp2
- store i32 %add, i32* %arrayidx13, align 4
+ store i32 %add, ptr %arrayidx13, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp ne i64 %indvars.iv.next, 1000
br i1 %exitcond, label %for.body9, label %for.cond.cleanup8
; %arrayidx6 = getelementptr inbounds [3 x [3 x i32]], [3 x [3 x i32]]* @A10, i64 0, i64 %indvars.iv, i64 %indvars.iv26
; %tmp = trunc i64 %indvars.iv26 to i32
; store i32 %tmp, i32* %arrayidx6, align 4
- %arrayidx10 = getelementptr inbounds [3 x [3 x i32]], [3 x [3 x i32]]* @A10, i64 0, i64 %indvars.iv, i64 %indvars.iv.next27
+ %arrayidx10 = getelementptr inbounds [3 x [3 x i32]], ptr @A10, i64 0, i64 %indvars.iv, i64 %indvars.iv.next27
%tmp1 = trunc i64 %indvars.iv to i32
- store i32 %tmp1, i32* %arrayidx10, align 4
+ store i32 %tmp1, ptr %arrayidx10, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%n.wide = zext i32 %n to i64
%exitcond = icmp ne i64 %indvars.iv.next, %n.wide
; no_deps_interchange just accesses a single nested array and can be interchange.
; CHECK: Name: Interchanged
; CHECK-NEXT: Function: no_deps_interchange
-define i32 @no_deps_interchange([1024 x i32]* nocapture %Arr) local_unnamed_addr #0 {
+define i32 @no_deps_interchange(ptr nocapture %Arr) local_unnamed_addr #0 {
entry:
br label %for1.header
for2: ; preds = %for1.header, %for2
%indvars.iv = phi i64 [ 0, %for1.header ], [ %indvars.iv.next, %for2 ]
- %arrayidx6 = getelementptr inbounds [1024 x i32], [1024 x i32]* %Arr, i64 %indvars.iv, i64 %indvars.iv19
- store i32 0, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds [1024 x i32], ptr %Arr, i64 %indvars.iv, i64 %indvars.iv19
+ store i32 0, ptr %arrayidx6, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp ne i64 %indvars.iv.next, 1024
br i1 %exitcond, label %for2, label %for1.inc
; No memory access using any induction variables, interchanging not beneficial.
; CHECK: Name: InterchangeNotProfitable
; CHECK-NEXT: Function: no_mem_instrs
-define i32 @no_mem_instrs(i64* %ptr) {
+define i32 @no_mem_instrs(ptr %ptr) {
entry:
br label %for1.header
for2: ; preds = %for1.header, %for2
%indvars.iv = phi i64 [ 0, %for1.header ], [ %indvars.iv.next, %for2 ]
- store i64 %indvars.iv, i64* %ptr, align 4
+ store i64 %indvars.iv, ptr %ptr, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp ne i64 %indvars.iv.next, 1024
br i1 %exitcond, label %for2, label %for1.inc
define void @test1() {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A:%.*]] = load i32, i32* @a, align 4
+; CHECK-NEXT: [[A:%.*]] = load i32, ptr @a, align 4
; CHECK-NEXT: br label [[FOR_BODY3_PREHEADER:%.*]]
; CHECK: for.body.preheader:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-NEXT: [[OR_REDUCTION_OUTER]] = phi i32 [ [[OR_LCSSA:%.*]], [[FOR_BODY3_SPLIT]] ], [ [[A]], [[FOR_BODY3_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY_PREHEADER]]
; CHECK: for.body3.split1:
-; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [200 x [200 x i32]], [200 x [200 x i32]]* @b, i64 0, i64 [[INDVAR0]], i64 [[INDEX]]
-; CHECK-NEXT: [[LOAD_VAL:%.*]] = load i32, i32* [[ARRAYIDX5]], align 4
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [200 x [200 x i32]], ptr @b, i64 0, i64 [[INDVAR0]], i64 [[INDEX]]
+; CHECK-NEXT: [[LOAD_VAL:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4
; CHECK-NEXT: [[OR]] = or i32 [[OR_REDUCTION_INNER]], [[LOAD_VAL]]
; CHECK-NEXT: [[INDVAR0_NEXT:%.*]] = add nsw i64 [[INDVAR0]], -1
; CHECK-NEXT: [[INDVAR1_NEXT:%.*]] = add nsw i32 [[INDVAR1]], -1
; CHECK-NEXT: br i1 [[TOBOOL]], label [[FOR_BODY3_SPLIT]], label [[FOR_BODY]]
; CHECK: for.cond.for.end8_crit_edge:
; CHECK-NEXT: [[OR_LCSSA_LCSSA:%.*]] = phi i32 [ [[OR_LCSSA]], [[FOR_BODY3_SPLIT]] ]
-; CHECK-NEXT: store i32 [[OR_LCSSA_LCSSA]], i32* @a, align 4
+; CHECK-NEXT: store i32 [[OR_LCSSA_LCSSA]], ptr @a, align 4
; CHECK-NEXT: br label [[FOR_END8:%.*]]
; CHECK: for.end8:
; CHECK-NEXT: ret void
;
entry:
- %a = load i32, i32* @a
+ %a = load i32, ptr @a
br label %for.body
for.body: ; preds = %for.body.lr.ph, %for.inc7
%or.reduction.inner = phi i32 [ %or.reduction.outer, %for.body ], [ %or, %for.body3 ]
%indvar0 = phi i64 [ 5, %for.body ], [ %indvar0.next, %for.body3 ]
%indvar1 = phi i32 [ 5, %for.body ], [ %indvar1.next, %for.body3 ]
- %arrayidx5 = getelementptr inbounds [200 x [200 x i32]], [200 x [200 x i32]]* @b, i64 0, i64 %indvar0, i64 %index
- %load.val = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds [200 x [200 x i32]], ptr @b, i64 0, i64 %indvar0, i64 %index
+ %load.val = load i32, ptr %arrayidx5, align 4
%or = or i32 %or.reduction.inner, %load.val
%indvar0.next = add nsw i64 %indvar0, -1
%indvar1.next = add nsw i32 %indvar1, -1
for.cond.for.end8_crit_edge: ; preds = %for.inc7
%or.lcssa.lcssa = phi i32 [ %or.lcssa, %for.inc7 ]
- store i32 %or.lcssa.lcssa, i32* @a
+ store i32 %or.lcssa.lcssa, ptr @a
br label %for.end8
for.end8: ; preds = %for.cond.for.end8_crit_edge, %entry
define void @test2() {
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A:%.*]] = load i32, i32* @a, align 4
+; CHECK-NEXT: [[A:%.*]] = load i32, ptr @a, align 4
; CHECK-NEXT: br label [[FOR_BODY3_PREHEADER:%.*]]
; CHECK: for.body.preheader:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-NEXT: [[OR_REDUCTION_OUTER]] = phi i32 [ [[OR_LCSSA:%.*]], [[FOR_BODY3_SPLIT]] ], [ [[A]], [[FOR_BODY3_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY_PREHEADER]]
; CHECK: for.body3.split1:
-; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [200 x [200 x i32]], [200 x [200 x i32]]* @b, i64 0, i64 [[INDVAR0]], i64 [[INDEX]]
-; CHECK-NEXT: [[LOAD_VAL:%.*]] = load i32, i32* [[ARRAYIDX5]], align 4
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [200 x [200 x i32]], ptr @b, i64 0, i64 [[INDVAR0]], i64 [[INDEX]]
+; CHECK-NEXT: [[LOAD_VAL:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4
; CHECK-NEXT: [[OR]] = or i32 [[OR_REDUCTION_INNER]], [[LOAD_VAL]]
; CHECK-NEXT: [[INDVAR0_NEXT:%.*]] = add nsw i64 [[INDVAR0]], -1
; CHECK-NEXT: [[INDVAR1_NEXT:%.*]] = add nsw i32 [[INDVAR1]], -2
; CHECK-NEXT: br i1 [[TOBOOL]], label [[FOR_BODY3_SPLIT]], label [[FOR_BODY]]
; CHECK: for.cond.for.end8_crit_edge:
; CHECK-NEXT: [[OR_LCSSA_LCSSA:%.*]] = phi i32 [ [[OR_LCSSA]], [[FOR_BODY3_SPLIT]] ]
-; CHECK-NEXT: store i32 [[OR_LCSSA_LCSSA]], i32* @a, align 4
+; CHECK-NEXT: store i32 [[OR_LCSSA_LCSSA]], ptr @a, align 4
; CHECK-NEXT: br label [[FOR_END8:%.*]]
; CHECK: for.end8:
; CHECK-NEXT: ret void
;
entry:
- %a = load i32, i32* @a
+ %a = load i32, ptr @a
br label %for.body
for.body: ; preds = %for.body.lr.ph, %for.inc7
%or.reduction.inner = phi i32 [ %or.reduction.outer, %for.body ], [ %or, %for.body3 ]
%indvar0 = phi i64 [ 5, %for.body ], [ %indvar0.next, %for.body3 ]
%indvar1 = phi i32 [ 6, %for.body ], [ %indvar1.next, %for.body3 ]
- %arrayidx5 = getelementptr inbounds [200 x [200 x i32]], [200 x [200 x i32]]* @b, i64 0, i64 %indvar0, i64 %index
- %load.val = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds [200 x [200 x i32]], ptr @b, i64 0, i64 %indvar0, i64 %index
+ %load.val = load i32, ptr %arrayidx5, align 4
%or = or i32 %or.reduction.inner, %load.val
%indvar0.next = add nsw i64 %indvar0, -1
%indvar1.next = add nsw i32 %indvar1, -2
for.cond.for.end8_crit_edge: ; preds = %for.inc7
%or.lcssa.lcssa = phi i32 [ %or.lcssa, %for.inc7 ]
- store i32 %or.lcssa.lcssa, i32* @a
+ store i32 %or.lcssa.lcssa, ptr @a
br label %for.end8
for.end8: ; preds = %for.cond.for.end8_crit_edge, %entry
define void @test3() {
; CHECK-LABEL: @test3(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A:%.*]] = load i32, i32* @a, align 4
+; CHECK-NEXT: [[A:%.*]] = load i32, ptr @a, align 4
; CHECK-NEXT: br label [[FOR_BODY3_PREHEADER:%.*]]
; CHECK: for.body.preheader:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-NEXT: [[OR_REDUCTION_OUTER]] = phi i32 [ [[OR_LCSSA:%.*]], [[FOR_BODY3_SPLIT]] ], [ [[A]], [[FOR_BODY3_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY_PREHEADER]]
; CHECK: for.body3.split1:
-; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [200 x [200 x i32]], [200 x [200 x i32]]* @b, i64 0, i32 [[INDVAR0]], i64 [[INDEX]]
-; CHECK-NEXT: [[LOAD_VAL:%.*]] = load i32, i32* [[ARRAYIDX5]], align 4
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [200 x [200 x i32]], ptr @b, i64 0, i32 [[INDVAR0]], i64 [[INDEX]]
+; CHECK-NEXT: [[LOAD_VAL:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4
; CHECK-NEXT: [[OR]] = or i32 [[OR_REDUCTION_INNER]], [[LOAD_VAL]]
; CHECK-NEXT: [[INDVAR0_NEXT:%.*]] = add nsw i32 [[INDVAR0]], 1
; CHECK-NEXT: [[INDVAR1_NEXT:%.*]] = add nsw i32 [[INDVAR1]], -1
; CHECK-NEXT: br i1 [[TOBOOL]], label [[FOR_BODY3_SPLIT]], label [[FOR_BODY]]
; CHECK: for.cond.for.end8_crit_edge:
; CHECK-NEXT: [[OR_LCSSA_LCSSA:%.*]] = phi i32 [ [[OR_LCSSA]], [[FOR_BODY3_SPLIT]] ]
-; CHECK-NEXT: store i32 [[OR_LCSSA_LCSSA]], i32* @a, align 4
+; CHECK-NEXT: store i32 [[OR_LCSSA_LCSSA]], ptr @a, align 4
; CHECK-NEXT: br label [[FOR_END8:%.*]]
; CHECK: for.end8:
; CHECK-NEXT: ret void
;
entry:
- %a = load i32, i32* @a
+ %a = load i32, ptr @a
br label %for.body
for.body: ; preds = %for.body.lr.ph, %for.inc7
%or.reduction.inner = phi i32 [ %or.reduction.outer, %for.body ], [ %or, %for.body3 ]
%indvar0 = phi i32 [ 5, %for.body ], [ %indvar0.next, %for.body3 ]
%indvar1 = phi i32 [ 49, %for.body ], [ %indvar1.next, %for.body3 ]
- %arrayidx5 = getelementptr inbounds [200 x [200 x i32]], [200 x [200 x i32]]* @b, i64 0, i32 %indvar0, i64 %index
- %load.val = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds [200 x [200 x i32]], ptr @b, i64 0, i32 %indvar0, i64 %index
+ %load.val = load i32, ptr %arrayidx5, align 4
%or = or i32 %or.reduction.inner, %load.val
%indvar0.next = add nsw i32 %indvar0, 1
%indvar1.next = add nsw i32 %indvar1, -1
for.cond.for.end8_crit_edge: ; preds = %for.inc7
%or.lcssa.lcssa = phi i32 [ %or.lcssa, %for.inc7 ]
- store i32 %or.lcssa.lcssa, i32* @a
+ store i32 %or.lcssa.lcssa, ptr @a
br label %for.end8
for.end8: ; preds = %for.cond.for.end8_crit_edge, %entry
define void @test1() {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A:%.*]] = load i32, i32* @a, align 4
+; CHECK-NEXT: [[A:%.*]] = load i32, ptr @a, align 4
; CHECK-NEXT: br label [[FOR_BODY4_PREHEADER:%.*]]
; CHECK: for.cond2.preheader.preheader:
; CHECK-NEXT: br label [[FOR_COND2_PREHEADER:%.*]]
; CHECK-NEXT: [[OR_REDUCTION]] = phi i32 [ [[OR_LCSSA:%.*]], [[FOR_BODY4_SPLIT]] ], [ [[A]], [[FOR_BODY4_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_COND2_PREHEADER_PREHEADER]]
; CHECK: for.body4.split1:
-; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [200 x [100 x i32]], [200 x [100 x i32]]* @b, i64 0, i64 [[INDVARS_IV]], i64 [[INDEX]]
-; CHECK-NEXT: [[LOAD_VAL:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4
+; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [200 x [100 x i32]], ptr @b, i64 0, i64 [[INDVARS_IV]], i64 [[INDEX]]
+; CHECK-NEXT: [[LOAD_VAL:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4
; CHECK-NEXT: [[OR]] = or i32 [[OR13]], [[LOAD_VAL]]
; CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nsw i64 [[INDVARS_IV]], -1
; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 0
; CHECK-NEXT: br i1 [[OUTER_COND]], label [[FOR_COND2_PREHEADER]], label [[FOR_BODY4_SPLIT]]
; CHECK: for.cond.for.end9_crit_edge:
; CHECK-NEXT: [[OR_LCSSA_LCSSA:%.*]] = phi i32 [ [[OR_LCSSA]], [[FOR_BODY4_SPLIT]] ]
-; CHECK-NEXT: store i32 [[OR_LCSSA_LCSSA]], i32* @a, align 4
+; CHECK-NEXT: store i32 [[OR_LCSSA_LCSSA]], ptr @a, align 4
; CHECK-NEXT: br label [[FOR_END9:%.*]]
; CHECK: for.end9:
; CHECK-NEXT: ret void
entry:
- %a = load i32, i32* @a, align 4
+ %a = load i32, ptr @a, align 4
br label %for.cond2.preheader
for.cond2.preheader: ; preds = %entry, %for.inc7
for.body4: ; preds = %for.cond2.preheader, %for.body4
%indvars.iv = phi i64 [ 5, %for.cond2.preheader ], [ %indvars.iv.next, %for.body4 ]
%or13 = phi i32 [ %or.reduction, %for.cond2.preheader ], [ %or, %for.body4 ]
- %arrayidx6 = getelementptr inbounds [200 x [100 x i32]], [200 x [100 x i32]]* @b, i64 0, i64 %indvars.iv, i64 %index
- %load.val = load i32, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds [200 x [100 x i32]], ptr @b, i64 0, i64 %indvars.iv, i64 %index
+ %load.val = load i32, ptr %arrayidx6, align 4
%or = or i32 %or13, %load.val
%indvars.iv.next = add nsw i64 %indvars.iv, -1
%tobool3 = icmp eq i64 %indvars.iv.next, 0
for.cond.for.end9_crit_edge: ; preds = %for.inc7
%or.lcssa.lcssa = phi i32 [ %or.lcssa, %for.inc7 ]
- store i32 %or.lcssa.lcssa, i32* @a, align 4
+ store i32 %or.lcssa.lcssa, ptr @a, align 4
br label %for.end9
for.end9: ; preds = %for.cond.for.end9_crit_edge, %entry
define void @test2() {
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A:%.*]] = load i32, i32* @a, align 4
+; CHECK-NEXT: [[A:%.*]] = load i32, ptr @a, align 4
; CHECK-NEXT: br label [[FOR_BODY4_PREHEADER:%.*]]
; CHECK: for.cond2.preheader.preheader:
; CHECK-NEXT: br label [[FOR_COND2_PREHEADER:%.*]]
; CHECK-NEXT: br label [[FOR_COND2_PREHEADER_PREHEADER]]
; CHECK: for.body4.split1:
; CHECK-NEXT: [[INDEX1:%.*]] = add nsw i64 [[INDVARS_IV]], [[INDVAR1]]
-; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [200 x [100 x i32]], [200 x [100 x i32]]* @b, i64 0, i64 [[INDEX1]], i64 [[INDEX0]]
-; CHECK-NEXT: [[LOAD_VAL:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4
+; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [200 x [100 x i32]], ptr @b, i64 0, i64 [[INDEX1]], i64 [[INDEX0]]
+; CHECK-NEXT: [[LOAD_VAL:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4
; CHECK-NEXT: [[OR]] = or i32 [[OR13]], [[LOAD_VAL]]
; CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nsw i64 [[INDVARS_IV]], -1
; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 0
; CHECK-NEXT: br i1 [[OUTER_COND]], label [[FOR_COND2_PREHEADER]], label [[FOR_BODY4_SPLIT]]
; CHECK: for.cond.for.end9_crit_edge:
; CHECK-NEXT: [[OR_LCSSA_LCSSA:%.*]] = phi i32 [ [[OR_LCSSA]], [[FOR_BODY4_SPLIT]] ]
-; CHECK-NEXT: store i32 [[OR_LCSSA_LCSSA]], i32* @a, align 4
+; CHECK-NEXT: store i32 [[OR_LCSSA_LCSSA]], ptr @a, align 4
; CHECK-NEXT: br label [[FOR_END9:%.*]]
; CHECK: for.end9:
; CHECK-NEXT: ret void
;
entry:
- %a = load i32, i32* @a, align 4
+ %a = load i32, ptr @a, align 4
br label %for.cond2.preheader
for.cond2.preheader: ; preds = %entry, %for.inc7
%indvars.iv = phi i64 [ 5, %for.cond2.preheader ], [ %indvars.iv.next, %for.body4 ]
%or13 = phi i32 [ %or.reduction, %for.cond2.preheader ], [ %or, %for.body4 ]
%index1 = add nsw i64 %indvars.iv, %indvar1
- %arrayidx6 = getelementptr inbounds [200 x [100 x i32]], [200 x [100 x i32]]* @b, i64 0, i64 %index1, i64 %index0
- %load.val = load i32, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds [200 x [100 x i32]], ptr @b, i64 0, i64 %index1, i64 %index0
+ %load.val = load i32, ptr %arrayidx6, align 4
%or = or i32 %or13, %load.val
%indvars.iv.next = add nsw i64 %indvars.iv, -1
%tobool3 = icmp eq i64 %indvars.iv.next, 0
for.cond.for.end9_crit_edge: ; preds = %for.inc7
%or.lcssa.lcssa = phi i32 [ %or.lcssa, %for.inc7 ]
- store i32 %or.lcssa.lcssa, i32* @a, align 4
+ store i32 %or.lcssa.lcssa, ptr @a, align 4
br label %for.end9
for.end9: ; preds = %for.cond.for.end9_crit_edge, %entry
define void @test3() {
; CHECK-LABEL: @test3(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A:%.*]] = load i32, i32* @a, align 4
+; CHECK-NEXT: [[A:%.*]] = load i32, ptr @a, align 4
; CHECK-NEXT: br label [[FOR_BODY4_PREHEADER:%.*]]
; CHECK: for.cond2.preheader.preheader:
; CHECK-NEXT: br label [[FOR_COND2_PREHEADER:%.*]]
; CHECK-NEXT: br label [[FOR_COND2_PREHEADER_PREHEADER]]
; CHECK: for.body4.split1:
; CHECK-NEXT: [[INDEX1:%.*]] = add nsw i64 [[INDVARS_IV]], [[INDVAR1]]
-; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [200 x [100 x i32]], [200 x [100 x i32]]* @b, i64 0, i64 [[INDEX1]], i64 [[INDEX0]]
-; CHECK-NEXT: [[LOAD_VAL:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4
+; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [200 x [100 x i32]], ptr @b, i64 0, i64 [[INDEX1]], i64 [[INDEX0]]
+; CHECK-NEXT: [[LOAD_VAL:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4
; CHECK-NEXT: [[OR]] = or i32 [[OR13]], [[LOAD_VAL]]
; CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nsw i64 [[INDVARS_IV]], -1
; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 0
; CHECK-NEXT: br i1 [[OUTER_COND]], label [[FOR_COND2_PREHEADER]], label [[FOR_BODY4_SPLIT]]
; CHECK: for.cond.for.end9_crit_edge:
; CHECK-NEXT: [[OR_LCSSA_LCSSA:%.*]] = phi i32 [ [[OR_LCSSA]], [[FOR_BODY4_SPLIT]] ]
-; CHECK-NEXT: store i32 [[OR_LCSSA_LCSSA]], i32* @a, align 4
+; CHECK-NEXT: store i32 [[OR_LCSSA_LCSSA]], ptr @a, align 4
; CHECK-NEXT: br label [[FOR_END9:%.*]]
; CHECK: for.end9:
; CHECK-NEXT: ret void
;
entry:
- %a = load i32, i32* @a, align 4
+ %a = load i32, ptr @a, align 4
br label %for.cond2.preheader
for.cond2.preheader: ; preds = %entry, %for.inc7
%indvars.iv = phi i64 [ 5, %for.cond2.preheader ], [ %indvars.iv.next, %for.body4 ]
%or13 = phi i32 [ %or.reduction, %for.cond2.preheader ], [ %or, %for.body4 ]
%index1 = add nsw i64 %indvars.iv, %indvar1
- %arrayidx6 = getelementptr inbounds [200 x [100 x i32]], [200 x [100 x i32]]* @b, i64 0, i64 %index1, i64 %index0
- %load.val = load i32, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds [200 x [100 x i32]], ptr @b, i64 0, i64 %index1, i64 %index0
+ %load.val = load i32, ptr %arrayidx6, align 4
%or = or i32 %or13, %load.val
%indvars.iv.next = add nsw i64 %indvars.iv, -1
%tobool3 = icmp eq i64 %indvars.iv.next, 0
for.cond.for.end9_crit_edge: ; preds = %for.inc7
%or.lcssa.lcssa = phi i32 [ %or.lcssa, %for.inc7 ]
- store i32 %or.lcssa.lcssa, i32* @a, align 4
+ store i32 %or.lcssa.lcssa, ptr @a, align 4
br label %for.end9
for.end9: ; preds = %for.cond.for.end9_crit_edge, %entry
; CHECK-NEXT: [[J:%.*]] = phi i64 [ [[TMP0:%.*]], [[FOR2_SPLIT:%.*]] ], [ 0, [[FOR2_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR1_HEADER_PREHEADER]]
; CHECK: for2.split1:
-; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* @A, i64 0, i64 [[J]], i64 [[J23]]
-; CHECK-NEXT: [[LV:%.*]] = load i64, i64* [[ARRAYIDX5]]
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [100 x [100 x i64]], ptr @A, i64 0, i64 [[J]], i64 [[J23]]
+; CHECK-NEXT: [[LV:%.*]] = load i64, ptr [[ARRAYIDX5]]
; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[LV]], [[K:%.*]]
-; CHECK-NEXT: store i64 [[ADD]], i64* [[ARRAYIDX5]]
+; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX5]]
; CHECK-NEXT: [[J_NEXT:%.*]] = add nuw nsw i64 [[J]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[J]], 99
; CHECK-NEXT: br label [[FOR1_INC10]]
for2:
%j = phi i64 [ %j.next, %for2 ], [ 0, %for1.header ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* @A, i64 0, i64 %j, i64 %j23
- %lv = load i64, i64* %arrayidx5
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i64]], ptr @A, i64 0, i64 %j, i64 %j23
+ %lv = load i64, ptr %arrayidx5
%add = add nsw i64 %lv, %k
- store i64 %add, i64* %arrayidx5
+ store i64 %add, ptr %arrayidx5
%j.next = add nuw nsw i64 %j, 1
%exitcond = icmp eq i64 %j, 99
br i1 %exitcond, label %for1.inc10, label %for2
; CHECK-NEXT: [[J:%.*]] = phi i64 [ [[TMP1:%.*]], [[FOR3_SPLIT:%.*]] ], [ 100, [[FOR3_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR1_HEADER_PREHEADER]]
; CHECK: for3.split1:
-; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* @A, i64 0, i64 [[J]], i64 [[J19]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[ARRAYIDX5]]
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [100 x [100 x i64]], ptr @A, i64 0, i64 [[J]], i64 [[J19]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX5]]
; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP0]], [[K:%.*]]
-; CHECK-NEXT: store i64 [[ADD]], i64* [[ARRAYIDX5]]
+; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX5]]
; CHECK-NEXT: [[J_NEXT:%.*]] = add nsw i64 [[J]], -1
; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i64 [[J]], 0
; CHECK-NEXT: br label [[FOR1_INC10]]
for3:
%j = phi i64 [ 100, %for1.header ], [ %j.next, %for3 ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* @A, i64 0, i64 %j, i64 %j19
- %0 = load i64, i64* %arrayidx5
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i64]], ptr @A, i64 0, i64 %j, i64 %j19
+ %0 = load i64, ptr %arrayidx5
%add = add nsw i64 %0, %k
- store i64 %add, i64* %arrayidx5
+ store i64 %add, ptr %arrayidx5
%j.next = add nsw i64 %j, -1
%cmp2 = icmp sgt i64 %j, 0
br i1 %cmp2, label %for3, label %for1.inc10
; CHECK-NEXT: br label [[FOR1_HEADER_PREHEADER]]
; CHECK: for2.split1:
; CHECK-NEXT: [[J_NEXT:%.*]] = add nuw nsw i64 [[J]], 1
-; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* @A, i64 0, i64 [[J]], i64 [[J23]]
-; CHECK-NEXT: store i64 [[J]], i64* [[ARRAYIDX5]]
-; CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* @A, i64 0, i64 [[J]], i64 [[J_NEXT24]]
-; CHECK-NEXT: store i64 [[J23]], i64* [[ARRAYIDX10]]
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [100 x [100 x i64]], ptr @A, i64 0, i64 [[J]], i64 [[J23]]
+; CHECK-NEXT: store i64 [[J]], ptr [[ARRAYIDX5]]
+; CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds [100 x [100 x i64]], ptr @A, i64 0, i64 [[J]], i64 [[J_NEXT24]]
+; CHECK-NEXT: store i64 [[J23]], ptr [[ARRAYIDX10]]
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[J]], 99
; CHECK-NEXT: br label [[FOR1_INC10]]
; CHECK: for2.split:
for2:
%j = phi i64 [ %j.next, %for2 ], [ 1, %for1.header ]
%j.next = add nuw nsw i64 %j, 1
- %arrayidx5 = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* @A, i64 0, i64 %j, i64 %j23
- store i64 %j, i64* %arrayidx5
- %arrayidx10 = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* @A, i64 0, i64 %j, i64 %j.next24
- store i64 %j23, i64* %arrayidx10
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i64]], ptr @A, i64 0, i64 %j, i64 %j23
+ store i64 %j, ptr %arrayidx5
+ %arrayidx10 = getelementptr inbounds [100 x [100 x i64]], ptr @A, i64 0, i64 %j, i64 %j.next24
+ store i64 %j23, ptr %arrayidx10
%exitcond = icmp eq i64 %j, 99
br i1 %exitcond, label %for1.inc10, label %for2
for.body6: ; preds = %for.body6, %for.cond4.preheader
%k.026 = phi i64 [ 0, %for.cond4.preheader ], [ %inc, %for.body6 ]
- %arrayidx8 = getelementptr inbounds [100 x [100 x [100 x i32]]], [100 x [100 x [100 x i32]]]* @D, i64 0, i64 %k.026, i64 %j.027, i64 %i.028
- %0 = load i32, i32* %arrayidx8
+ %arrayidx8 = getelementptr inbounds [100 x [100 x [100 x i32]]], ptr @D, i64 0, i64 %k.026, i64 %j.027, i64 %i.028
+ %0 = load i32, ptr %arrayidx8
%add = add nsw i32 %0, %t
- store i32 %add, i32* %arrayidx8
+ store i32 %add, ptr %arrayidx8
%inc = add nuw nsw i64 %k.026, 1
%exitcond = icmp eq i64 %inc, 100
br i1 %exitcond, label %for.inc12, label %for.body6
; CHECK-DELIN-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[INNER_PREHEADER]] ], [ [[TMP1:%.*]], [[INNER_FOR_BODY_SPLIT]] ]
; CHECK-DELIN-NEXT: br label [[OUTER_PREHEADER]]
; CHECK-DELIN: inner.for.body.split1:
-; CHECK-DELIN-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [16 x [16 x i32]], [16 x [16 x i32]]* [[TEMP]], i64 0, i64 [[INDVARS_IV]], i64 [[INDVARS_IV27]]
-; CHECK-DELIN-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4
-; CHECK-DELIN-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [16 x [16 x i32]], [16 x [16 x i32]]* [[RES]], i64 0, i64 [[INDVARS_IV]], i64 [[INDVARS_IV27]]
-; CHECK-DELIN-NEXT: store i32 [[TMP0]], i32* [[ARRAYIDX8]], align 4
+; CHECK-DELIN-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [16 x [16 x i32]], ptr [[TEMP]], i64 0, i64 [[INDVARS_IV]], i64 [[INDVARS_IV27]]
+; CHECK-DELIN-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4
+; CHECK-DELIN-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [16 x [16 x i32]], ptr [[RES]], i64 0, i64 [[INDVARS_IV]], i64 [[INDVARS_IV27]]
+; CHECK-DELIN-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX8]], align 4
; CHECK-DELIN-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-DELIN-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
; CHECK-DELIN-NEXT: br label [[INNER_CRIT_EDGE:%.*]]
inner.for.body: ; preds = %inner.preheader, %inner.for.body
%indvars.iv = phi i64 [ 0, %inner.preheader ], [ %indvars.iv.next, %inner.for.body ]
- %arrayidx6 = getelementptr inbounds [16 x [16 x i32]], [16 x [16 x i32]]* %temp, i64 0, i64 %indvars.iv, i64 %indvars.iv27
- %0 = load i32, i32* %arrayidx6, align 4
- %arrayidx8 = getelementptr inbounds [16 x [16 x i32]], [16 x [16 x i32]]* %res, i64 0, i64 %indvars.iv, i64 %indvars.iv27
- store i32 %0, i32* %arrayidx8, align 4
+ %arrayidx6 = getelementptr inbounds [16 x [16 x i32]], ptr %temp, i64 0, i64 %indvars.iv, i64 %indvars.iv27
+ %0 = load i32, ptr %arrayidx6, align 4
+ %arrayidx8 = getelementptr inbounds [16 x [16 x i32]], ptr %res, i64 0, i64 %indvars.iv, i64 %indvars.iv27
+ store i32 %0, ptr %arrayidx8, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp ne i64 %indvars.iv.next, %wide.trip.count
br i1 %exitcond, label %inner.for.body, label %inner.crit_edge
; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ [[TMP0:%.*]], [[INNER_SPLIT:%.*]] ], [ 0, [[INNER_PREHEADER]] ]
; CHECK-NEXT: br label [[OUTER_HEADER_PREHEADER]]
; CHECK: inner.split1:
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x [4 x [2 x i16]]], [4 x [4 x [2 x i16]]]* @global, i64 0, i64 [[INNER_IV]], i64 [[OUTER_IV]], i64 0
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x [4 x [2 x i16]]], ptr @global, i64 0, i64 [[INNER_IV]], i64 [[OUTER_IV]], i64 0
; CHECK-NEXT: [[INNER_IV_NEXT:%.*]] = add nsw i64 [[INNER_IV]], 1
; CHECK-NEXT: [[C_1:%.*]] = icmp ne i64 [[INNER_IV_NEXT]], [[N_EXT]]
; CHECK-NEXT: br label [[OUTER_LATCH]]
inner: ; preds = %bb6, %bb4
%inner.iv = phi i64 [ 0, %outer.header ], [ %inner.iv.next, %inner ]
- %tmp8 = getelementptr inbounds [4 x [4 x [2 x i16]]], [4 x [4 x [2 x i16]]]* @global, i64 0, i64 %inner.iv, i64 %outer.iv, i64 0
+ %tmp8 = getelementptr inbounds [4 x [4 x [2 x i16]]], ptr @global, i64 0, i64 %inner.iv, i64 %outer.iv, i64 0
%inner.iv.next = add nsw i64 %inner.iv, 1
%c.1 = icmp ne i64 %inner.iv.next, %N.ext
br i1 %c.1, label %inner, label %outer.latch
for.body3: ; preds = %for.body3, %outer.header
%iv.inner = phi i64 [ %iv.inner.next, %for.body3 ], [ 1, %outer.header ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %iv.inner, i64 %iv.outer
- %vA = load i32, i32* %arrayidx5
- %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @C, i64 0, i64 %iv.inner, i64 %iv.outer
- %vC = load i32, i32* %arrayidx9
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %iv.inner, i64 %iv.outer
+ %vA = load i32, ptr %arrayidx5
+ %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], ptr @C, i64 0, i64 %iv.inner, i64 %iv.outer
+ %vC = load i32, ptr %arrayidx9
%add = add nsw i32 %vA, %vC
- store i32 %add, i32* %arrayidx5
+ store i32 %add, ptr %arrayidx5
%iv.inner.next = add nuw nsw i64 %iv.inner, 1
%exitcond = icmp eq i64 %iv.inner.next, 100
br i1 %exitcond, label %outer.inc, label %for.body3
for.exit: ; preds = %outer.inc
%iv.outer.next.lcssa = phi i64 [ %iv.outer.next, %outer.inc ]
- store i64 %iv.outer.next.lcssa, i64* @Y
+ store i64 %iv.outer.next.lcssa, ptr @Y
br label %for.end16
for.end16: ; preds = %for.exit, %entry
for.body3: ; preds = %for.body3, %outer.header
%iv.inner = phi i64 [ %iv.inner.next, %for.body3 ], [ 1, %outer.header ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %iv.inner, i64 %iv.outer
- %vA = load i32, i32* %arrayidx5
- %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @C, i64 0, i64 %iv.inner, i64 %iv.outer
- %vC = load i32, i32* %arrayidx9
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %iv.inner, i64 %iv.outer
+ %vA = load i32, ptr %arrayidx5
+ %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], ptr @C, i64 0, i64 %iv.inner, i64 %iv.outer
+ %vC = load i32, ptr %arrayidx9
%add = add nsw i32 %vA, %vC
- store i32 %add, i32* %arrayidx5
+ store i32 %add, ptr %arrayidx5
%iv.inner.next = add nuw nsw i64 %iv.inner, 1
%exitcond = icmp eq i64 %iv.inner.next, 100
br i1 %exitcond, label %outer.inc, label %for.body3
for.exit: ; preds = %outer.inc
%iv.inner.end.lcssa = phi i64 [ %iv.inner.end, %outer.inc ]
- store i64 %iv.inner.end.lcssa, i64* @Y
+ store i64 %iv.inner.end.lcssa, ptr @Y
br label %for.end16
for.end16: ; preds = %for.exit, %entry
for.body3: ; preds = %for.body3, %outer.header
%iv.inner = phi i64 [ %iv.inner.next, %for.body3 ], [ 1, %outer.header ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %iv.inner, i64 %iv.outer
- %vA = load i32, i32* %arrayidx5
- %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @C, i64 0, i64 %iv.inner, i64 %iv.outer
- %vC = load i32, i32* %arrayidx9
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %iv.inner, i64 %iv.outer
+ %vA = load i32, ptr %arrayidx5
+ %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], ptr @C, i64 0, i64 %iv.inner, i64 %iv.outer
+ %vC = load i32, ptr %arrayidx9
%add = add nsw i32 %vA, %vC
- store i32 %add, i32* %arrayidx5
+ store i32 %add, ptr %arrayidx5
%iv.inner.next = add nuw nsw i64 %iv.inner, 1
%exitcond = icmp eq i64 %iv.inner.next, 100
br i1 %exitcond, label %outer.inc, label %for.body3
for.exit: ; preds = %outer.inc
%iv.inner.lcssa.lcssa = phi i64 [ %iv.inner.lcssa, %outer.inc ]
- store i64 %iv.inner.lcssa.lcssa, i64* @Y
+ store i64 %iv.inner.lcssa.lcssa, ptr @Y
br label %for.end16
for.end16: ; preds = %for.exit
for.body3: ; preds = %for.body3, %outer.header
%iv.inner = phi i64 [ %iv.inner.next, %for.body3 ], [ 1, %outer.header ]
%float.inner = phi float [ %float.inner.next, %for.body3 ], [ %float.outer, %outer.header ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %iv.inner, i64 %iv.outer
- %vA = load i32, i32* %arrayidx5
- %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @C, i64 0, i64 %iv.inner, i64 %iv.outer
- %vC = load i32, i32* %arrayidx9
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %iv.inner, i64 %iv.outer
+ %vA = load i32, ptr %arrayidx5
+ %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], ptr @C, i64 0, i64 %iv.inner, i64 %iv.outer
+ %vC = load i32, ptr %arrayidx9
%add = add nsw i32 %vA, %vC
%float.inner.next = fadd fast float %float.inner, 1.000000e+00
- store i32 %add, i32* %arrayidx5
+ store i32 %add, ptr %arrayidx5
%iv.inner.next = add nuw nsw i64 %iv.inner, 1
%exitcond = icmp eq i64 %iv.inner.next, 100
br i1 %exitcond, label %outer.inc, label %for.body3
for.exit: ; preds = %outer.inc
%float.outer.lcssa = phi float [ %float.outer.next, %outer.inc ]
- store float %float.outer.lcssa, float* @F
+ store float %float.outer.lcssa, ptr @F
br label %for.end16
for.end16: ; preds = %for.exit
; REMARK: Interchanged
; REMARK-NEXT: lcssa_05
-define void @lcssa_05(i32* %ptr) {
+define void @lcssa_05(ptr %ptr) {
entry:
br label %outer.header
br i1 undef, label %bb2, label %bb3
bb2: ; preds = %for.body3
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %iv.inner, i64 %iv.outer
- %vA = load i32, i32* %arrayidx5
- %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @C, i64 0, i64 %iv.inner, i64 %iv.outer
- %vC = load i32, i32* %arrayidx9
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %iv.inner, i64 %iv.outer
+ %vA = load i32, ptr %arrayidx5
+ %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], ptr @C, i64 0, i64 %iv.inner, i64 %iv.outer
+ %vC = load i32, ptr %arrayidx9
%add = add nsw i32 %vA, %vC
br label %bb3
bb3: ; preds = %bb2, %for.body3
%addp = phi i32 [ %add, %bb2 ], [ 0, %for.body3 ]
- store i32 %addp, i32* %ptr
+ store i32 %addp, ptr %ptr
%iv.inner.next = add nuw nsw i64 %iv.inner, 1
%exitcond = icmp eq i64 %iv.inner.next, 100
br i1 %exitcond, label %outer.inc, label %for.body3
for.exit: ; preds = %outer.inc
%iv.inner.lcssa.lcssa = phi i64 [ %iv.inner.lcssa, %outer.inc ]
- store i64 %iv.inner.lcssa.lcssa, i64* @Y
+ store i64 %iv.inner.lcssa.lcssa, ptr @Y
br label %for.end16
for.end16: ; preds = %for.exit
; REMARK: UnsupportedExitPHI
; REMARK-NEXT: lcssa_06
-define void @lcssa_06(i64* %ptr, i32* %ptr1) {
+define void @lcssa_06(ptr %ptr, ptr %ptr1) {
entry:
br label %outer.header
for.body3: ; preds = %for.body3, %outer.header
%iv.inner = phi i64 [ %iv.inner.next, %for.body3 ], [ 1, %outer.header ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %iv.inner, i64 %iv.outer
- %vA = load i32, i32* %arrayidx5
- %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @C, i64 0, i64 %iv.inner, i64 %iv.outer
- %vC = load i32, i32* %arrayidx9
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %iv.inner, i64 %iv.outer
+ %vA = load i32, ptr %arrayidx5
+ %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], ptr @C, i64 0, i64 %iv.inner, i64 %iv.outer
+ %vC = load i32, ptr %arrayidx9
%add = add nsw i32 %vA, %vC
- store i32 %add, i32* %ptr1
+ store i32 %add, ptr %ptr1
%iv.inner.next = add nuw nsw i64 %iv.inner, 1
%exitcond = icmp eq i64 %iv.inner.next, 100
br i1 %exitcond, label %outer.inc, label %for.body3
for.exit: ; preds = %outer.inc
%sv.lcssa = phi i64 [ %sv, %outer.inc ]
- store i64 %sv.lcssa, i64* @Y
+ store i64 %sv.lcssa, ptr @Y
br label %for.end16
for.end16: ; preds = %for.exit
for.body3: ; preds = %for.body3, %outer.header
%iv.inner = phi i64 [ %iv.inner.next, %for.body3 ], [ 1, %outer.header ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %iv.inner, i64 %iv.outer
- %vA = load i32, i32* %arrayidx5
- %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @C, i64 0, i64 %iv.inner, i64 %iv.outer
- %vC = load i32, i32* %arrayidx9
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %iv.inner, i64 %iv.outer
+ %vA = load i32, ptr %arrayidx5
+ %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], ptr @C, i64 0, i64 %iv.inner, i64 %iv.outer
+ %vC = load i32, ptr %arrayidx9
%add = add nsw i32 %vA, %vC
- store i32 %add, i32* %arrayidx5
+ store i32 %add, ptr %arrayidx5
%iv.inner.next = add nuw nsw i64 %iv.inner, 1
%exitcond = icmp eq i64 %iv.inner.next, 100
br i1 %exitcond, label %outer.bb, label %for.body3
for.exit: ; preds = %outer.inc
%iv.inner.lcssa.lcssa = phi i64 [ %iv.inner.lcssa, %outer.inc ]
- store i64 %iv.inner.lcssa.lcssa, i64* @Y
+ store i64 %iv.inner.lcssa.lcssa, ptr @Y
br label %for.end16
for.end16: ; preds = %for.exit
; is an lcssa phi node outside the loopnest.
; REMARK: Interchanged
; REMARK-NEXT: lcssa_08
-define i64 @lcssa_08([100 x [100 x i64]]* %Arr) {
+define i64 @lcssa_08(ptr %Arr) {
entry:
br label %for1.header
for2: ; preds = %for2, %for1.header
%indvars.iv = phi i64 [ 0, %for1.header ], [ %indvars.iv.next.3, %for2 ]
- %arrayidx = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* %Arr, i64 0, i64 %indvars.iv, i64 %indvars.iv23
- %lv = load i64, i64* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [100 x [100 x i64]], ptr %Arr, i64 0, i64 %indvars.iv, i64 %indvars.iv23
+ %lv = load i64, ptr %arrayidx, align 4
%indvars.iv.next.3 = add nuw nsw i64 %indvars.iv, 1
%exit1 = icmp eq i64 %indvars.iv.next.3, 100
br i1 %exit1, label %for1.inc, label %for2
for.body3:
%indvars.iv = phi i64 [ 1, %for.body3.lr.ph ], [ %indvars.iv.next, %for.body3 ]
%2 = add nsw i64 %indvars.iv, -1
- %arrayidx6 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %1, i64 %2
- %3 = load i32, i32* %arrayidx6
- %arrayidx10 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @B, i64 0, i64 %indvars.iv34, i64 %indvars.iv
- %4 = load i32, i32* %arrayidx10
+ %arrayidx6 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %1, i64 %2
+ %3 = load i32, ptr %arrayidx6
+ %arrayidx10 = getelementptr inbounds [100 x [100 x i32]], ptr @B, i64 0, i64 %indvars.iv34, i64 %indvars.iv
+ %4 = load i32, ptr %arrayidx10
%add = add nsw i32 %4, %3
- store i32 %add, i32* %arrayidx6
+ store i32 %add, ptr %arrayidx6
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv to i32
%exitcond = icmp eq i32 %lftr.wideiv, %0
for.body4:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body4 ], [ 1, %for.cond1.preheader ]
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %arrayidx7 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv.next, i64 %indvars.iv.next29
- %2 = load i32, i32* %arrayidx7
+ %arrayidx7 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %indvars.iv.next, i64 %indvars.iv.next29
+ %2 = load i32, ptr %arrayidx7
%add8 = add nsw i32 %2, %k
- store i32 %add8, i32* %arrayidx7
+ store i32 %add8, ptr %arrayidx7
%lftr.wideiv = trunc i64 %indvars.iv to i32
%exitcond = icmp eq i32 %lftr.wideiv, %0
br i1 %exitcond, label %for.cond.loopexit, label %for.body4
for.body4: ; preds = %for.body4, %for.cond1.preheader
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body4 ]
- %arrayidx6 = getelementptr inbounds [3 x [3 x i32]], [3 x [3 x i32]]* @A10, i64 0, i64 %indvars.iv, i64 %indvars.iv26
+ %arrayidx6 = getelementptr inbounds [3 x [3 x i32]], ptr @A10, i64 0, i64 %indvars.iv, i64 %indvars.iv26
%tmp = trunc i64 %indvars.iv26 to i32
- store i32 %tmp, i32* %arrayidx6, align 4
- %arrayidx10 = getelementptr inbounds [3 x [3 x i32]], [3 x [3 x i32]]* @A10, i64 0, i64 %indvars.iv, i64 %indvars.iv.next27
+ store i32 %tmp, ptr %arrayidx6, align 4
+ %arrayidx10 = getelementptr inbounds [3 x [3 x i32]], ptr @A10, i64 0, i64 %indvars.iv, i64 %indvars.iv.next27
%tmp1 = trunc i64 %indvars.iv to i32
- store i32 %tmp1, i32* %arrayidx10, align 4
+ store i32 %tmp1, ptr %arrayidx10, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp ne i64 %indvars.iv.next, 3
br i1 %exitcond, label %for.body4, label %for.cond.loopexit
for.body:
%indvars.iv32 = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next33, %for.inc15 ]
%2 = add nsw i64 %indvars.iv32, %1
- %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* @C, i64 0, i64 %indvars.iv32
+ %arrayidx = getelementptr inbounds [100 x i32], ptr @C, i64 0, i64 %indvars.iv32
%3 = trunc i64 %2 to i32
- store i32 %3, i32* %arrayidx
+ store i32 %3, ptr %arrayidx
br label %for.body3
for.body3:
%indvars.iv = phi i64 [ 0, %for.body ], [ %indvars.iv.next, %for.body3 ]
- %arrayidx7 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv32, i64 %indvars.iv
- %4 = load i32, i32* %arrayidx7
+ %arrayidx7 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %indvars.iv32, i64 %indvars.iv
+ %4 = load i32, ptr %arrayidx7
%add10 = add nsw i32 %3, %4
- store i32 %add10, i32* %arrayidx7
+ store i32 %add10, ptr %arrayidx7
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv to i32
%exitcond = icmp eq i32 %lftr.wideiv, %0
for.body3:
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv.next24, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx5
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %indvars.iv.next24, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx5
%add6 = add nsw i32 %0, %k
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %arrayidx11 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv23, i64 %indvars.iv.next
- store i32 %add6, i32* %arrayidx11
+ %arrayidx11 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %indvars.iv23, i64 %indvars.iv.next
+ store i32 %add6, ptr %arrayidx11
%exitcond = icmp eq i64 %indvars.iv.next, 99
br i1 %exitcond, label %for.inc12, label %for.body3
for.body6: ; preds = %for.body6, %for.cond4.preheader
%k.026 = phi i64 [ 0, %for.cond4.preheader ], [ %inc, %for.body6 ]
- %arrayidx8 = getelementptr inbounds [100 x [100 x [100 x i32]]], [100 x [100 x [100 x i32]]]* @D, i32 0, i64 %i.028, i64 %k.026, i64 %j.027
- %0 = load i32, i32* %arrayidx8
+ %arrayidx8 = getelementptr inbounds [100 x [100 x [100 x i32]]], ptr @D, i32 0, i64 %i.028, i64 %k.026, i64 %j.027
+ %0 = load i32, ptr %arrayidx8
%add = add nsw i32 %0, %t
- store i32 %add, i32* %arrayidx8
+ store i32 %add, ptr %arrayidx8
%inc = add nuw nsw i64 %k.026, 1
%exitcond = icmp eq i64 %inc, 100
br i1 %exitcond, label %for.inc12, label %for.body6
for.body:
%indvars.iv32 = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next33, %for.inc15 ]
%2 = add nsw i64 %indvars.iv32, %1
- %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* @B, i64 0, i64 %indvars.iv32
+ %arrayidx = getelementptr inbounds [100 x i32], ptr @B, i64 0, i64 %indvars.iv32
%3 = trunc i64 %2 to i32
- store i32 %3, i32* %arrayidx
+ store i32 %3, ptr %arrayidx
br label %for.body3
for.body3:
%indvars.iv = phi i64 [ 0, %for.body ], [ %indvars.iv.next, %for.body3 ]
- %arrayidx7 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv32, i64 %indvars.iv
- %4 = load i32, i32* %arrayidx7
+ %arrayidx7 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %indvars.iv32, i64 %indvars.iv
+ %4 = load i32, ptr %arrayidx7
%add10 = add nsw i32 %3, %4
- store i32 %add10, i32* %arrayidx7
+ store i32 %add10, ptr %arrayidx7
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv to i32
%exitcond = icmp eq i32 %lftr.wideiv, %0
for.body3:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body3 ], [ 2, %for.body ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv24, i64 %indvars.iv
- %1 = load i32, i32* %arrayidx5
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %indvars.iv24, i64 %indvars.iv
+ %1 = load i32, ptr %arrayidx5
%add = add nsw i32 %1, %k
- store i32 %add, i32* %arrayidx5
+ store i32 %add, ptr %arrayidx5
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv to i32
%exitcond = icmp eq i32 %lftr.wideiv, %0
for2:
%j = phi i64 [ %j.next, %for2 ], [ 0, %preheader.j ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %j, i64 %j23
- %lv = load i32, i32* %arrayidx5
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %j, i64 %j23
+ %lv = load i32, ptr %arrayidx5
%add = add nsw i32 %lv, %k
- store i32 %add, i32* %arrayidx5
+ store i32 %add, ptr %arrayidx5
%j.next = add nuw nsw i64 %j, 1
%exitcond = icmp eq i64 %j, 99
br i1 %exitcond, label %for1.inc10, label %for2
;CHECK-NEXT: br label [[FOR_BODY_PREHEADER]]
;CHECK: for.body3.split1:
;CHECK-NEXT: [[TMP0:%.*]] = add nuw nsw i64 [[INDVARS_IV22]], 5
-;CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [3 x [5 x [8 x i16]]], [3 x [5 x [8 x i16]]]* @b, i64 0, i64 [[INDVARS_IV]], i64 [[INDVARS_IV]], i64 [[TMP0]]
-;CHECK-NEXT: [[TMP1:%.*]] = load i16, i16* [[ARRAYIDX7]]
+;CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [3 x [5 x [8 x i16]]], ptr @b, i64 0, i64 [[INDVARS_IV]], i64 [[INDVARS_IV]], i64 [[TMP0]]
+;CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[ARRAYIDX7]]
;CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
-;CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* @a
+;CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @a
;CHECK-NEXT: [[TMP_OR:%.*]] = or i32 [[TMP2]], [[CONV]]
-;CHECK-NEXT: store i32 [[TMP_OR]], i32* @a
+;CHECK-NEXT: store i32 [[TMP_OR]], ptr @a
;CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1
;CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], 3
;CHECK-NEXT: br label [[FOR_INC8_LOOPEXIT:%.*]]
;CHECK-NEXT: [[EXITCOND25:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT23]], 3
;CHECK-NEXT: br i1 [[EXITCOND25]], label [[FOR_BODY]], label [[FOR_BODY3_SPLIT]]
;CHECK: for.end10:
-;CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* @a
+;CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr @a
;CHECK-NEXT: ret void
entry:
for.body3: ; preds = %for.cond1.preheader, %for.body3
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
%0 = add nuw nsw i64 %indvars.iv22, 5
- %arrayidx7 = getelementptr inbounds [3 x [5 x [8 x i16]]], [3 x [5 x [8 x i16]]]* @b, i64 0, i64 %indvars.iv, i64 %indvars.iv, i64 %0
- %1 = load i16, i16* %arrayidx7
+ %arrayidx7 = getelementptr inbounds [3 x [5 x [8 x i16]]], ptr @b, i64 0, i64 %indvars.iv, i64 %indvars.iv, i64 %0
+ %1 = load i16, ptr %arrayidx7
%conv = sext i16 %1 to i32
- %2 = load i32, i32* @a
+ %2 = load i32, ptr @a
%or = or i32 %2, %conv
- store i32 %or, i32* @a
+ store i32 %or, ptr @a
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp ne i64 %indvars.iv.next, 3
br i1 %exitcond, label %for.body3, label %for.inc8.loopexit
br i1 %exitcond25, label %for.body, label %for.end10
for.end10: ; preds = %for.inc8
- %3 = load i32, i32* @a
+ %3 = load i32, ptr @a
ret void
}
; CHECK-NEXT: br label [[OUTERMOST_HEADER:%.*]]
; CHECK: outermost.header:
; CHECK-NEXT: [[INDVAR_OUTERMOST:%.*]] = phi i32 [ 10, [[ENTRY:%.*]] ], [ [[INDVAR_OUTERMOST_NEXT:%.*]], [[OUTERMOST_LATCH:%.*]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @a, align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @a, align 4
; CHECK-NEXT: [[TOBOOL71_I:%.*]] = icmp eq i32 [[TMP0]], 0
; CHECK-NEXT: br label [[INNERMOST_PREHEADER:%.*]]
; CHECK: middle.header.preheader:
; CHECK-NEXT: [[INDVAR_INNERMOST:%.*]] = phi i64 [ [[TMP1:%.*]], [[INNERMOST_BODY_SPLIT]] ], [ 4, [[INNERMOST_PREHEADER]] ]
; CHECK-NEXT: br label [[MIDDLE_HEADER_PREHEADER]]
; CHECK: innermost.body.split1:
-; CHECK-NEXT: [[ARRAYIDX9_I:%.*]] = getelementptr inbounds [1 x [6 x i32]], [1 x [6 x i32]]* @d, i64 0, i64 [[INDVAR_INNERMOST]], i64 [[INDVAR_MIDDLE]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX9_I]], align 4
+; CHECK-NEXT: [[ARRAYIDX9_I:%.*]] = getelementptr inbounds [1 x [6 x i32]], ptr @d, i64 0, i64 [[INDVAR_INNERMOST]], i64 [[INDVAR_MIDDLE]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX9_I]], align 4
; CHECK-NEXT: [[INDVAR_INNERMOST_NEXT:%.*]] = add nsw i64 [[INDVAR_INNERMOST]], -1
; CHECK-NEXT: [[TOBOOL5_I:%.*]] = icmp eq i64 [[INDVAR_INNERMOST_NEXT]], 0
; CHECK-NEXT: br label [[MIDDLE_LATCH_LOOPEXIT:%.*]]
outermost.header: ; preds = %outermost.latch, %entry
%indvar.outermost = phi i32 [ 10, %entry ], [ %indvar.outermost.next, %outermost.latch ]
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%tobool71.i = icmp eq i32 %0, 0
br label %middle.header
innermost.body: ; preds = %innermost.preheader, %innermost.body
%indvar.innermost = phi i64 [ %indvar.innermost.next, %innermost.body ], [ 4, %innermost.preheader ]
- %arrayidx9.i = getelementptr inbounds [1 x [6 x i32]], [1 x [6 x i32]]* @d, i64 0, i64 %indvar.innermost, i64 %indvar.middle
- store i32 0, i32* %arrayidx9.i, align 4
+ %arrayidx9.i = getelementptr inbounds [1 x [6 x i32]], ptr @d, i64 0, i64 %indvar.innermost, i64 %indvar.middle
+ store i32 0, ptr %arrayidx9.i, align 4
%indvar.innermost.next = add nsw i64 %indvar.innermost, -1
%tobool5.i = icmp eq i64 %indvar.innermost.next, 0
br i1 %tobool5.i, label %innermost.loopexit, label %innermost.body
br i1 %exitcond, label %outer.inc, label %for.body3
outer.inc: ; preds = %for.body3
- %arrayidx5 = getelementptr inbounds [500 x [500 x i32]], [500 x [500 x i32]]* @A, i64 0, i64 %indvars.iv, i64 %indvars.iv18
- %0 = load i32, i32* %arrayidx5
+ %arrayidx5 = getelementptr inbounds [500 x [500 x i32]], ptr @A, i64 0, i64 %indvars.iv, i64 %indvars.iv18
+ %0 = load i32, ptr %arrayidx5
%add = add nsw i32 %add15, %0
%indvars.iv.next19 = add nuw nsw i64 %indvars.iv18, 1
%lftr.wideiv20 = trunc i64 %indvars.iv.next19 to i32
; CHECK-LABEL: exit:
; CHECK-NEXT: %v4.lcssa = phi i64 [ %0, %inner.body.split ]
; CHECK-NEXT: %v8.lcssa.lcssa = phi i64 [ %[[IVNEXT]], %inner.body.split ]
-; CHECK-NEXT: store i64 %v8.lcssa.lcssa, i64* @b, align 4
-; CHECK-NEXT: store i64 %v4.lcssa, i64* @a, align 4
+; CHECK-NEXT: store i64 %v8.lcssa.lcssa, ptr @b, align 4
+; CHECK-NEXT: store i64 %v4.lcssa, ptr @a, align 4
entry:
br label %outer.header
inner.body: ; preds = %inner.body, %outer.header
%iv.inner = phi i64 [ 5, %outer.header ], [ %iv.inner.next, %inner.body ]
- %v7 = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @c, i64 0, i64 %iv.inner, i64 %iv.outer
- store i32 0, i32* %v7, align 4
+ %v7 = getelementptr inbounds [10 x [10 x i32]], ptr @c, i64 0, i64 %iv.inner, i64 %iv.outer
+ store i32 0, ptr %v7, align 4
%iv.inner.next = add nsw i64 %iv.inner, -1
%v9 = icmp eq i64 %iv.inner, 0
br i1 %v9, label %outer.latch, label %inner.body
exit: ; preds = %outer.latch
%v4.lcssa = phi i64 [ %iv.outer.next, %outer.latch ]
%v8.lcssa.lcssa = phi i64 [ %v8.lcssa, %outer.latch ]
- store i64 %v8.lcssa.lcssa, i64* @b, align 4
- store i64 %v4.lcssa, i64* @a, align 4
+ store i64 %v8.lcssa.lcssa, ptr @b, align 4
+ store i64 %v4.lcssa, ptr @a, align 4
ret void
}
; CHECK-LABEL: exit:
; CHECK-NEXT: %v4.lcssa = phi i64 [ %0, %inner.body.split ]
; CHECK-NEXT: %v8.lcssa.lcssa = phi i64 [ %iv.inner, %inner.body.split ]
-; CHECK-NEXT: store i64 %v8.lcssa.lcssa, i64* @b, align 4
-; CHECK-NEXT: store i64 %v4.lcssa, i64* @a, align 4
+; CHECK-NEXT: store i64 %v8.lcssa.lcssa, ptr @b, align 4
+; CHECK-NEXT: store i64 %v4.lcssa, ptr @a, align 4
entry:
br label %outer.header
inner.body: ; preds = %inner.body, %outer.header
%iv.inner = phi i64 [ 5, %outer.header ], [ %iv.inner.next, %inner.body ]
- %v7 = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @c, i64 0, i64 %iv.inner, i64 %iv.outer
- store i32 0, i32* %v7, align 4
+ %v7 = getelementptr inbounds [10 x [10 x i32]], ptr @c, i64 0, i64 %iv.inner, i64 %iv.outer
+ store i32 0, ptr %v7, align 4
%iv.inner.next = add nsw i64 %iv.inner, -1
%v9 = icmp eq i64 %iv.inner.next, 0
br i1 %v9, label %outer.latch, label %inner.body
exit: ; preds = %outer.latch
%v4.lcssa = phi i64 [ %iv.outer, %outer.latch ]
%v8.lcssa.lcssa = phi i64 [ %v8.lcssa, %outer.latch ]
- store i64 %v8.lcssa.lcssa, i64* @b, align 4
- store i64 %v4.lcssa, i64* @a, align 4
+ store i64 %v8.lcssa.lcssa, ptr @b, align 4
+ store i64 %v4.lcssa, ptr @a, align 4
ret void
}
; CHECK-NEXT: %v8.lcssa.lcssa = phi i64 [ %[[IVNEXT]], %inner.body.split ]
; CHECK-NEXT: %v8.lcssa.lcssa.2 = phi i64 [ %[[IVNEXT]], %inner.body.split ]
; CHECK-NEXT: %r1 = add i64 %v8.lcssa.lcssa, %v8.lcssa.lcssa.2
-; CHECK-NEXT: store i64 %r1, i64* @b, align 4
-; CHECK-NEXT: store i64 %v4.lcssa, i64* @a, align 4
+; CHECK-NEXT: store i64 %r1, ptr @b, align 4
+; CHECK-NEXT: store i64 %v4.lcssa, ptr @a, align 4
entry:
inner.body: ; preds = %inner.body, %outer.header
%iv.inner = phi i64 [ 5, %outer.header ], [ %iv.inner.next, %inner.body ]
- %v7 = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @c, i64 0, i64 %iv.inner, i64 %iv.outer
- store i32 0, i32* %v7, align 4
+ %v7 = getelementptr inbounds [10 x [10 x i32]], ptr @c, i64 0, i64 %iv.inner, i64 %iv.outer
+ store i32 0, ptr %v7, align 4
%iv.inner.next = add nsw i64 %iv.inner, -1
%v9 = icmp eq i64 %iv.inner, 0
br i1 %v9, label %outer.latch, label %inner.body
%v8.lcssa.lcssa = phi i64 [ %v8.lcssa, %outer.latch ]
%v8.lcssa.lcssa.2 = phi i64 [ %v8.lcssa, %outer.latch ]
%r1 = add i64 %v8.lcssa.lcssa, %v8.lcssa.lcssa.2
- store i64 %r1, i64* @b, align 4
- store i64 %v4.lcssa, i64* @a, align 4
+ store i64 %r1, ptr @b, align 4
+ store i64 %v4.lcssa, ptr @a, align 4
ret void
}
inner.body: ; preds = %inner.ph, %inner.body
%tmp31 = phi i32 [ 0, %inner.ph ], [ %tmp6, %inner.body]
- %tmp5 = load i32*, i32** undef, align 8
+ %tmp5 = load ptr, ptr undef, align 8
%tmp6 = add nsw i32 %tmp31, 1
br i1 undef, label %inner.body, label %outer.latch
; Function Attrs: norecurse nounwind
-define void @test(i32 %T, [90 x i32]* noalias nocapture %C, [90 x [90 x i16]]* noalias nocapture readonly %A, i16* noalias nocapture readonly %B) local_unnamed_addr #0 {
+define void @test(i32 %T, ptr noalias nocapture %C, ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B) local_unnamed_addr #0 {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR3_PREHEADER:%.*]]
; CHECK-NEXT: br label [[FOR1_HEADER_PREHEADER]]
; CHECK: for3.split1:
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[K]], [[MUL]]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [90 x [90 x i16]], [90 x [90 x i16]]* [[A:%.*]], i32 [[ADD]], i32 [[I]], i32 [[J]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* [[ARRAYIDX]], align 2
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [90 x [90 x i16]], ptr [[A:%.*]], i32 [[ADD]], i32 [[I]], i32 [[J]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
; CHECK-NEXT: [[ADD15:%.*]] = add nsw i16 [[TMP0]], 1
-; CHECK-NEXT: store i16 [[ADD15]], i16* [[ARRAYIDX]]
+; CHECK-NEXT: store i16 [[ADD15]], ptr [[ARRAYIDX]]
; CHECK-NEXT: [[INC:%.*]] = add nuw nsw i32 [[K]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], 90
; CHECK-NEXT: br label [[FOR2_INC16]]
for3: ; preds = %for3, %for2.header
%k = phi i32 [ 1, %for2.header ], [ %inc, %for3 ]
%add = add nsw i32 %k, %mul
- %arrayidx = getelementptr inbounds [90 x [90 x i16]], [90 x [90 x i16]]* %A, i32 %add, i32 %i, i32 %j
- %0 = load i16, i16* %arrayidx, align 2
+ %arrayidx = getelementptr inbounds [90 x [90 x i16]], ptr %A, i32 %add, i32 %i, i32 %j
+ %0 = load i16, ptr %arrayidx, align 2
%add15 = add nsw i16 %0, 1
- store i16 %add15, i16* %arrayidx
+ store i16 %add15, ptr %arrayidx
%inc = add nuw nsw i32 %k, 1
%exitcond = icmp eq i32 %inc, 90
br i1 %exitcond, label %for2.inc16, label %for3
; CHECK: for.body:
; CHECK-NEXT: [[INC41:%.*]] = phi i32 [ [[INC4:%.*]], [[FOR_INC3:%.*]] ], [ undef, [[FOR_BODY_PREHEADER:%.*]] ]
; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[INC41]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [5 x i32], [5 x i32]* @b, i64 0, i64 [[IDXPROM]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [5 x i32], ptr @b, i64 0, i64 [[IDXPROM]]
; CHECK-NEXT: br label [[FOR_INC:%.*]]
; CHECK: for.body2.preheader:
; CHECK-NEXT: br label [[FOR_BODY2:%.*]]
; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32 [ [[TMP1:%.*]], [[FOR_INC_SPLIT:%.*]] ], [ 1, [[FOR_BODY2_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY_PREHEADER]]
; CHECK: for.inc:
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
-; CHECK-NEXT: store i32 undef, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: store i32 undef, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[LSR_IV]], 4
; CHECK-NEXT: [[LSR_IV_NEXT:%.*]] = add nuw nsw i32 [[LSR_IV]], 1
; CHECK-NEXT: br label [[FOR_COND1_FOR_END_CRIT_EDGE:%.*]]
for.inc: ; preds = %for.body2
%idxprom = sext i32 %inc41 to i64
- %arrayidx = getelementptr inbounds [5 x i32], [5 x i32]* @b, i64 0, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
- store i32 undef, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [5 x i32], ptr @b, i64 0, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ store i32 undef, ptr %arrayidx, align 4
%cmp = icmp slt i32 %lsr.iv, 4
%lsr.iv.next = add nuw nsw i32 %lsr.iv, 1
br i1 %cmp, label %for.body2, label %for.cond1.for.end_crit_edge
; CHECK: for.body:
; CHECK-NEXT: [[INC41:%.*]] = phi i32 [ [[INC4:%.*]], [[FOR_INC3:%.*]] ], [ undef, [[FOR_BODY_PREHEADER:%.*]] ]
; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[INC41]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [5 x i32], [5 x i32]* @b, i64 0, i64 [[IDXPROM]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [5 x i32], ptr @b, i64 0, i64 [[IDXPROM]]
; CHECK-NEXT: br label [[FOR_INC:%.*]]
; CHECK: for.body2.preheader:
; CHECK-NEXT: br label [[FOR_BODY2:%.*]]
; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32 [ [[TMP1:%.*]], [[FOR_INC_SPLIT:%.*]] ], [ 1, [[FOR_BODY2_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY_PREHEADER]]
; CHECK: for.inc:
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[LSR_IV]], 4
; CHECK-NEXT: [[CMP_ZEXT:%.*]] = zext i1 [[CMP]] to i32
-; CHECK-NEXT: store i32 [[CMP_ZEXT]], i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: store i32 [[CMP_ZEXT]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[LSR_IV_NEXT:%.*]] = add nuw nsw i32 [[LSR_IV]], 1
; CHECK-NEXT: br label [[FOR_COND1_FOR_END_CRIT_EDGE:%.*]]
; CHECK: for.inc.split:
for.inc: ; preds = %for.body2
%idxprom = sext i32 %inc41 to i64
- %arrayidx = getelementptr inbounds [5 x i32], [5 x i32]* @b, i64 0, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [5 x i32], ptr @b, i64 0, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
%cmp = icmp slt i32 %lsr.iv, 4
%cmp.zext = zext i1 %cmp to i32
- store i32 %cmp.zext, i32* %arrayidx, align 4
+ store i32 %cmp.zext, ptr %arrayidx, align 4
%lsr.iv.next = add nuw nsw i32 %lsr.iv, 1
br i1 %cmp, label %for.body2, label %for.cond1.for.end_crit_edge
; REMARKS-NEXT: Name: Interchanged
; REMARKS-NEXT: Function: pr43326-triply-nested
-define void @pr43326-triply-nested([10 x [10 x i32]]* %e, [10 x [10 x i32]]* %f) {
+define void @pr43326-triply-nested(ptr %e, ptr %f) {
entry:
br label %for.outermost.header
for.innermost: ; preds = %for.middle.header, %for.innermost
%indvars.innermost = phi i64 [ 0, %for.middle.header ], [ %indvars.innermost.next, %for.innermost ]
- %arrayidx12 = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* %e, i64 %indvars.innermost, i64 %indvars.middle, i64 %indvars.outermost
- %0 = load i32, i32* %arrayidx12
- %arrayidx18 = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* %f, i64 %indvars.innermost, i64 %indvars.middle, i64 %indvars.outermost
- store i32 %0, i32* %arrayidx18
+ %arrayidx12 = getelementptr inbounds [10 x [10 x i32]], ptr %e, i64 %indvars.innermost, i64 %indvars.middle, i64 %indvars.outermost
+ %0 = load i32, ptr %arrayidx12
+ %arrayidx18 = getelementptr inbounds [10 x [10 x i32]], ptr %f, i64 %indvars.innermost, i64 %indvars.middle, i64 %indvars.outermost
+ store i32 %0, ptr %arrayidx18
%indvars.innermost.next = add nuw nsw i64 %indvars.innermost, 1
%exitcond.innermost = icmp ne i64 %indvars.innermost.next, 10
br i1 %exitcond.innermost, label %for.innermost, label %for.middle.latch
define void @pr43326() {
entry:
- %0 = load i32, i32* @a
+ %0 = load i32, ptr @a
%tobool.not2 = icmp eq i32 %0, 0
br i1 %tobool.not2, label %for.end14, label %for.body.lr.ph
for.body.lr.ph: ; preds = %entry
- %d.promoted = load i32, i32* @d
- %a.promoted = load i32, i32* @a
+ %d.promoted = load i32, ptr @d
+ %a.promoted = load i32, ptr @a
br label %for.body
for.body: ; preds = %for.body.lr.ph, %for.inc12
%xor5 = phi i32 [ %xor.lcssa9, %for.body3 ], [ %xor, %for.inc ]
%inc4 = phi i32 [ 0, %for.body3 ], [ %inc, %for.inc ]
%idxprom = sext i32 %inc4 to i64
- %arrayidx9 = getelementptr inbounds [1 x [1 x i32]], [1 x [1 x i32]]* @e, i64 0, i64 %idxprom, i64 %idxprom8
- %1 = load i32, i32* %arrayidx9
+ %arrayidx9 = getelementptr inbounds [1 x [1 x i32]], ptr @e, i64 0, i64 %idxprom, i64 %idxprom8
+ %1 = load i32, ptr %arrayidx9
%xor = xor i32 %xor5, %1
br label %for.inc
%inc.lcssa.lcssa.lcssa = phi i32 [ %inc.lcssa.lcssa, %for.inc12 ]
%xor.lcssa.lcssa.lcssa = phi i32 [ %xor.lcssa.lcssa, %for.inc12 ]
%dec.lcssa.lcssa = phi i8 [ %dec.lcssa, %for.inc12 ]
- store i8 %dec.lcssa.lcssa, i8* @b
- store i32 %xor.lcssa.lcssa.lcssa, i32* @d
- store i32 %inc.lcssa.lcssa.lcssa, i32* @c
- store i32 %inc13.lcssa, i32* @a
+ store i8 %dec.lcssa.lcssa, ptr @b
+ store i32 %xor.lcssa.lcssa.lcssa, ptr @d
+ store i32 %inc.lcssa.lcssa.lcssa, ptr @c
+ store i32 %inc13.lcssa, ptr @a
br label %for.end14
for.end14: ; preds = %for.cond.for.end14_crit_edge, %entry
; CHECK-NEXT: br label [[OUTER_HEADER:%.*]]
; CHECK: outer.header:
; CHECK-NEXT: [[OUTER_IV:%.*]] = phi i64 [ undef, [[ENTRY:%.*]] ], [ [[OUTER_IV_NEXT:%.*]], [[OUTER_LATCH:%.*]] ]
-; CHECK-NEXT: [[IDX:%.*]] = getelementptr inbounds double, double* undef, i64 [[OUTER_IV]]
+; CHECK-NEXT: [[IDX:%.*]] = getelementptr inbounds double, ptr undef, i64 [[OUTER_IV]]
; CHECK-NEXT: br label [[INNER:%.*]]
; CHECK: inner:
; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ 0, [[OUTER_HEADER]] ], [ [[INNER_IV_NEXT:%.*]], [[INNER]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = load double, double* [[IDX]], align 8
-; CHECK-NEXT: store double undef, double* [[IDX]], align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[IDX]], align 8
+; CHECK-NEXT: store double undef, ptr [[IDX]], align 8
; CHECK-NEXT: [[INNER_IV_NEXT]] = add nuw nsw i64 [[INNER_IV]], 1
; CHECK-NEXT: br i1 false, label [[INNER]], label [[OUTER_LATCH]]
; CHECK: outer.latch:
outer.header: ; preds = %for.cond26.for.end44_crit_edge.us, %entry
%outer.iv = phi i64 [ undef, %entry ], [ %outer.iv.next, %outer.latch ]
- %idx = getelementptr inbounds double, double* undef, i64 %outer.iv
+ %idx = getelementptr inbounds double, ptr undef, i64 %outer.iv
br label %inner
inner: ; preds = %for.body28.us, %for.body25.us
%inner.iv = phi i64 [ 0, %outer.header ], [ %inner.iv.next, %inner ]
- %0 = load double, double* %idx, align 8
- store double undef, double* %idx, align 8
+ %0 = load double, ptr %idx, align 8
+ store double undef, ptr %idx, align 8
%inner.iv.next = add nuw nsw i64 %inner.iv, 1
br i1 undef, label %inner, label %outer.latch
; CHECK-NEXT: br label [[OUTER_HEADER:%.*]]
; CHECK: outer.header:
; CHECK-NEXT: [[OUTER_IV:%.*]] = phi i64 [ undef, [[ENTRY:%.*]] ], [ [[OUTER_IV_NEXT:%.*]], [[OUTER_LATCH:%.*]] ]
-; CHECK-NEXT: [[IDX:%.*]] = getelementptr inbounds double, double* undef, i64 [[OUTER_IV]]
+; CHECK-NEXT: [[IDX:%.*]] = getelementptr inbounds double, ptr undef, i64 [[OUTER_IV]]
; CHECK-NEXT: br label [[INNER:%.*]]
; CHECK: inner:
; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ 0, [[OUTER_HEADER]] ], [ [[INNER_IV_NEXT:%.*]], [[INNER]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = load double, double* [[IDX]], align 8
-; CHECK-NEXT: store double undef, double* [[IDX]], align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[IDX]], align 8
+; CHECK-NEXT: store double undef, ptr [[IDX]], align 8
; CHECK-NEXT: [[INNER_IV_NEXT]] = add nuw nsw i64 [[INNER_IV]], 1
; CHECK-NEXT: br i1 false, label [[INNER]], label [[INNER_EXIT:%.*]]
; CHECK: inner.exit:
outer.header: ; preds = %for.cond26.for.end44_crit_edge.us, %entry
%outer.iv = phi i64 [ undef, %entry ], [ %outer.iv.next, %outer.latch ]
- %idx = getelementptr inbounds double, double* undef, i64 %outer.iv
+ %idx = getelementptr inbounds double, ptr undef, i64 %outer.iv
br label %inner
inner: ; preds = %for.body28.us, %for.body25.us
%inner.iv = phi i64 [ 0, %outer.header ], [ %inner.iv.next, %inner ]
- %0 = load double, double* %idx, align 8
- store double undef, double* %idx, align 8
+ %0 = load double, ptr %idx, align 8
+ store double undef, ptr %idx, align 8
%inner.iv.next = add nuw nsw i64 %inner.iv, 1
br i1 undef, label %inner, label %inner.exit
; CHECK-NEXT: br label [[OUTER_HEADER:%.*]]
; CHECK: outer.header:
; CHECK-NEXT: [[OUTER_IDX:%.*]] = phi i64 [ [[OUTER_IDX_INC:%.*]], [[OUTER_LATCH:%.*]] ], [ 0, [[OUTER_HEADER_PREHEADER:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [5 x [5 x double]], [5 x [5 x double]]* @wdtdr, i64 0, i64 0, i64 [[OUTER_IDX]]
+; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [5 x [5 x double]], ptr @wdtdr, i64 0, i64 0, i64 [[OUTER_IDX]]
; CHECK-NEXT: br label [[INNER_HEADER_SPLIT:%.*]]
; CHECK: inner.header.preheader:
; CHECK-NEXT: br label [[INNER_HEADER:%.*]]
; CHECK-NEXT: [[INNER_IDX:%.*]] = phi i64 [ [[TMP3:%.*]], [[INNER_LATCH_SPLIT:%.*]] ], [ 0, [[INNER_HEADER_PREHEADER]] ]
; CHECK-NEXT: br label [[OUTER_HEADER_PREHEADER]]
; CHECK: inner.header.split:
-; CHECK-NEXT: [[TMP0:%.*]] = load double, double* [[ARRAYIDX8]], align 8
-; CHECK-NEXT: store double undef, double* [[ARRAYIDX8]], align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[ARRAYIDX8]], align 8
+; CHECK-NEXT: store double undef, ptr [[ARRAYIDX8]], align 8
; CHECK-NEXT: br label [[INNER_LATCH:%.*]]
; CHECK: inner.latch:
; CHECK-NEXT: [[INNER_IDX_INC:%.*]] = add nsw i64 [[INNER_IDX]], 1
outer.header: ; preds = %for.inc27, %entry
%outer.idx = phi i64 [ 0, %entry ], [ %outer.idx.inc, %outer.latch ]
- %arrayidx8 = getelementptr inbounds [5 x [5 x double]], [5 x [5 x double]]* @wdtdr, i64 0, i64 0, i64 %outer.idx
+ %arrayidx8 = getelementptr inbounds [5 x [5 x double]], ptr @wdtdr, i64 0, i64 0, i64 %outer.idx
br label %inner.header
inner.header: ; preds = %for.inc, %for.body
%inner.idx = phi i64 [ 0, %outer.header ], [ %inner.idx.inc, %inner.latch]
- %0 = load double, double* %arrayidx8, align 8
- store double undef, double* %arrayidx8, align 8
+ %0 = load double, ptr %arrayidx8, align 8
+ store double undef, ptr %arrayidx8, align 8
br label %inner.latch
inner.latch: ; preds = %for.body6
; CHECK-NEXT: br label [[OUTER_HEADER:%.*]]
; CHECK: outer.header:
; CHECK-NEXT: [[OUTER_IDX:%.*]] = phi i64 [ [[OUTER_IDX_INC:%.*]], [[OUTER_LATCH:%.*]] ], [ 0, [[OUTER_HEADER_PREHEADER:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [5 x [5 x double]], [5 x [5 x double]]* @wdtdr, i64 0, i64 0, i64 [[OUTER_IDX]]
+; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [5 x [5 x double]], ptr @wdtdr, i64 0, i64 0, i64 [[OUTER_IDX]]
; CHECK-NEXT: br label [[INNER_HEADER_SPLIT:%.*]]
; CHECK: inner.header.preheader:
; CHECK-NEXT: br label [[INNER_HEADER:%.*]]
; CHECK-NEXT: [[INNER_IDX:%.*]] = phi i64 [ [[TMP3:%.*]], [[INNER_LATCH_SPLIT:%.*]] ], [ 0, [[INNER_HEADER_PREHEADER]] ]
; CHECK-NEXT: br label [[OUTER_HEADER_PREHEADER]]
; CHECK: inner.header.split:
-; CHECK-NEXT: [[TMP0:%.*]] = load double, double* [[ARRAYIDX8]], align 8
-; CHECK-NEXT: store double undef, double* [[ARRAYIDX8]], align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[ARRAYIDX8]], align 8
+; CHECK-NEXT: store double undef, ptr [[ARRAYIDX8]], align 8
; CHECK-NEXT: br label [[INNER_LATCH:%.*]]
; CHECK: inner.latch:
; CHECK-NEXT: [[INNER_IDX_INC:%.*]] = add nsw i64 [[INNER_IDX]], 1
outer.header: ; preds = %for.inc27, %entry
%outer.idx = phi i64 [ 0, %entry ], [ %outer.idx.inc, %outer.latch ]
- %arrayidx8 = getelementptr inbounds [5 x [5 x double]], [5 x [5 x double]]* @wdtdr, i64 0, i64 0, i64 %outer.idx
+ %arrayidx8 = getelementptr inbounds [5 x [5 x double]], ptr @wdtdr, i64 0, i64 0, i64 %outer.idx
br label %inner.header
inner.header: ; preds = %for.inc, %for.body
%inner.idx = phi i64 [ 0, %outer.header ], [ %inner.idx.inc, %inner.latch]
- %0 = load double, double* %arrayidx8, align 8
- store double undef, double* %arrayidx8, align 8
+ %0 = load double, ptr %arrayidx8, align 8
+ store double undef, ptr %arrayidx8, align 8
br label %inner.latch
inner.latch: ; preds = %for.body6
; CHECK-NEXT: [[OUTER_RED]] = phi i32 [ [[RED_NEXT_LCSSA:%.*]], [[INNER_SPLIT]] ], [ 0, [[INNER_PH]] ]
; CHECK-NEXT: br label [[OUTER_HEADER_PREHEADER]]
; CHECK: inner.split1:
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [400 x [400 x i32]], [400 x [400 x i32]]* @global, i64 0, i64 [[INNER_IV]], i64 [[TMP4]]
-; CHECK-NEXT: store i32 0, i32* [[PTR]], align 4
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [400 x [400 x i32]], ptr @global, i64 0, i64 [[INNER_IV]], i64 [[TMP4]]
+; CHECK-NEXT: store i32 0, ptr [[PTR]], align 4
; CHECK-NEXT: [[RED_NEXT]] = or i32 [[INNER_RED]], 20
; CHECK-NEXT: [[INNER_IV_NEXT:%.*]] = add nsw i64 [[INNER_IV]], 1
; CHECK-NEXT: [[EC_1:%.*]] = icmp eq i64 [[INNER_IV_NEXT]], 400
inner: ; preds = %bb5, %bb3
%inner.iv = phi i64 [ 0, %inner.ph ], [ %inner.iv.next, %inner ]
%inner.red = phi i32 [ %outer.red, %inner.ph ], [ %red.next, %inner ]
- %ptr = getelementptr inbounds [400 x [400 x i32]], [400 x [400 x i32]]* @global, i64 0, i64 %inner.iv, i64 %tmp4
- store i32 0, i32* %ptr
+ %ptr = getelementptr inbounds [400 x [400 x i32]], ptr @global, i64 0, i64 %inner.iv, i64 %tmp4
+ store i32 0, ptr %ptr
%red.next = or i32 %inner.red, 20
%inner.iv.next = add nsw i64 %inner.iv, 1
%ec.1 = icmp eq i64 %inner.iv.next, 400
; CHECK: inner:
; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ 0, [[INNER_PH]] ], [ [[INNER_IV_NEXT:%.*]], [[INNER]] ]
; CHECK-NEXT: [[INNER_RED:%.*]] = phi i32 [ [[OUTER_RED]], [[INNER_PH]] ], [ [[RED_NEXT:%.*]], [[INNER]] ]
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [400 x [400 x i32]], [400 x [400 x i32]]* @global, i64 0, i64 [[INNER_IV]], i64 [[TMP4]]
-; CHECK-NEXT: store i32 0, i32* [[PTR]], align 4
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [400 x [400 x i32]], ptr @global, i64 0, i64 [[INNER_IV]], i64 [[TMP4]]
+; CHECK-NEXT: store i32 0, ptr [[PTR]], align 4
; CHECK-NEXT: [[RED_NEXT]] = or i32 [[INNER_RED]], 20
; CHECK-NEXT: [[INNER_IV_NEXT]] = add nsw i64 [[INNER_IV]], 1
; CHECK-NEXT: [[EC_1:%.*]] = icmp eq i64 [[INNER_IV_NEXT]], 400
inner: ; preds = %bb5, %bb3
%inner.iv = phi i64 [ 0, %inner.ph ], [ %inner.iv.next, %inner ]
%inner.red = phi i32 [ %outer.red, %inner.ph ], [ %red.next, %inner ]
- %ptr = getelementptr inbounds [400 x [400 x i32]], [400 x [400 x i32]]* @global, i64 0, i64 %inner.iv, i64 %tmp4
- store i32 0, i32* %ptr
+ %ptr = getelementptr inbounds [400 x [400 x i32]], ptr @global, i64 0, i64 %inner.iv, i64 %tmp4
+ store i32 0, ptr %ptr
%red.next = or i32 %inner.red, 20
%inner.iv.next = add nsw i64 %inner.iv, 1
%ec.1 = icmp eq i64 %inner.iv.next, 400
; REMARKS-NEXT: Name: Interchanged
; REMARKS-NEXT: Function: pr48212
-define void @pr48212([5 x i32]* %filter) {
+define void @pr48212(ptr %filter) {
entry:
br label %L1
%temp.12 = phi i32 [ %temp.04, %L2 ], [ %add, %for.inc ]
%k2.01 = phi i32 [ 0, %L2 ], [ %inc, %for.inc ]
%idxprom = sext i32 %k2.01 to i64
- %arrayidx = getelementptr inbounds [5 x i32], [5 x i32]* %filter, i64 %idxprom
+ %arrayidx = getelementptr inbounds [5 x i32], ptr %filter, i64 %idxprom
%idxprom4 = sext i32 %k1.03 to i64
- %arrayidx5 = getelementptr inbounds [5 x i32], [5 x i32]* %arrayidx, i64 0, i64 %idxprom4
- %0 = load i32, i32* %arrayidx5
+ %arrayidx5 = getelementptr inbounds [5 x i32], ptr %arrayidx, i64 0, i64 %idxprom4
+ %0 = load i32, ptr %arrayidx5
%add = add nsw i32 %temp.12, %0
br label %for.inc
for2:
%j = phi i64 [ %i.next, %for2 ], [ 1, %for2.preheader ]
%j.prev = add nsw i64 %j, -1
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %j.prev, i64 %i30
- %lv1 = load i32, i32* %arrayidx5
- %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @B, i64 0, i64 %j, i64 %i30
- %lv2 = load i32, i32* %arrayidx9
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %j.prev, i64 %i30
+ %lv1 = load i32, ptr %arrayidx5
+ %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], ptr @B, i64 0, i64 %j, i64 %i30
+ %lv2 = load i32, ptr %arrayidx9
%add = add nsw i32 %lv1, %lv2
- %arrayidx13 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %j, i64 %i30
- store i32 %add, i32* %arrayidx13
+ %arrayidx13 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %j, i64 %i30
+ store i32 %add, ptr %arrayidx13
%i.next = add nuw nsw i64 %j, 1
%exitcond = icmp eq i64 %j, 99
br i1 %exitcond, label %for1.inc14, label %for2
for2:
%j = phi i64 [ 1, %for1.header ], [ %i.next, %for2 ]
%j.prev = add nsw i64 %j, -1
- %arrayidx6 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %j.prev, i64 %i.prev
- %lv1 = load i32, i32* %arrayidx6
- %arrayidx12 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @B, i64 0, i64 %j.prev, i64 %i.prev
- %lv2 = load i32, i32* %arrayidx12
+ %arrayidx6 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %j.prev, i64 %i.prev
+ %lv1 = load i32, ptr %arrayidx6
+ %arrayidx12 = getelementptr inbounds [100 x [100 x i32]], ptr @B, i64 0, i64 %j.prev, i64 %i.prev
+ %lv2 = load i32, ptr %arrayidx12
%add = add nsw i32 %lv1, %lv2
- store i32 %add, i32* %arrayidx6
+ store i32 %add, ptr %arrayidx6
%i.next = add nuw nsw i64 %j, 1
%exitcond = icmp eq i64 %j, 99
br i1 %exitcond, label %for1.inc19, label %for2
for2:
%j = phi i64 [ 1, %for1.header ], [ %i.next, %for2 ]
%j.prev = add nsw i64 %j, -1
- %arrayidx6 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %i.prev, i64 %j.prev
- %lv1 = load i32, i32* %arrayidx6
- %arrayidx10 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @B, i64 0, i64 %i34, i64 %j
- %lv2 = load i32, i32* %arrayidx10
+ %arrayidx6 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %i.prev, i64 %j.prev
+ %lv1 = load i32, ptr %arrayidx6
+ %arrayidx10 = getelementptr inbounds [100 x [100 x i32]], ptr @B, i64 0, i64 %i34, i64 %j
+ %lv2 = load i32, ptr %arrayidx10
%add = add nsw i32 %lv1, %lv2
- store i32 %add, i32* %arrayidx6
+ store i32 %add, ptr %arrayidx6
%i.next = add nuw nsw i64 %j, 1
%exitcond = icmp eq i64 %j, 99
br i1 %exitcond, label %for1.inc17, label %for2
for.body3:
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv21, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx5
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], ptr @A, i64 0, i64 %indvars.iv21, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx5
%add = add nsw i32 %0, %k
- store i32 %add, i32* %arrayidx5
+ store i32 %add, ptr %arrayidx5
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 100
br i1 %exitcond, label %for.inc10, label %for.body3
; REMARKS-NEXT: Name: Interchanged
; REMARKS-NEXT: Function: test1
-define i64 @test1([100 x [100 x i64]]* %Arr) {
+define i64 @test1(ptr %Arr) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR2_PREHEADER:%.*]]
; CHECK-NEXT: [[SUM_OUTER]] = phi i64 [ [[SUM_INC_LCSSA:%.*]], [[FOR2_SPLIT]] ], [ 0, [[FOR2_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR1_HEADER_PREHEADER]]
; CHECK: for2.split1:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* [[ARR:%.*]], i64 0, i64 [[INDVARS_IV]], i64 [[INDVARS_IV23]]
-; CHECK-NEXT: [[LV:%.*]] = load i64, i64* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x [100 x i64]], ptr [[ARR:%.*]], i64 0, i64 [[INDVARS_IV]], i64 [[INDVARS_IV23]]
+; CHECK-NEXT: [[LV:%.*]] = load i64, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[SUM_INC]] = add i64 [[SUM_INNER]], [[LV]]
; CHECK-NEXT: [[IV_ORIGINAL:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXIT1_ORIGINAL:%.*]] = icmp eq i64 [[IV_ORIGINAL]], 100
for2: ; preds = %for2, %for1.header
%indvars.iv = phi i64 [ 0, %for1.header ], [ %indvars.iv.next.3, %for2 ]
%sum.inner = phi i64 [ %sum.outer, %for1.header ], [ %sum.inc, %for2 ]
- %arrayidx = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* %Arr, i64 0, i64 %indvars.iv, i64 %indvars.iv23
- %lv = load i64, i64* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [100 x [100 x i64]], ptr %Arr, i64 0, i64 %indvars.iv, i64 %indvars.iv23
+ %lv = load i64, ptr %arrayidx, align 4
%sum.inc = add i64 %sum.inner, %lv
%indvars.iv.next.3 = add nuw nsw i64 %indvars.iv, 1
%exit1 = icmp eq i64 %indvars.iv.next.3, 100
; REMARKS-NEXT: Name: UnsupportedPHIOuter
; REMARKS-NEXT: Function: test2
-define i64 @test2([100 x [100 x i64]]* %Arr) {
+define i64 @test2(ptr %Arr) {
entry:
br label %for1.header
for2: ; preds = %for2, %for1.header
%indvars.iv = phi i64 [ 0, %for1.header ], [ %indvars.iv.next.3, %for2 ]
%inner = phi i64 [ %indvars.iv23, %for1.header ], [ %sum.inc, %for2 ]
- %arrayidx = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* %Arr, i64 0, i64 %indvars.iv, i64 %indvars.iv23
- %lv = load i64, i64* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [100 x [100 x i64]], ptr %Arr, i64 0, i64 %indvars.iv, i64 %indvars.iv23
+ %lv = load i64, ptr %arrayidx, align 4
%sum.inc = add i64 %inner, %lv
%indvars.iv.next.3 = add nuw nsw i64 %indvars.iv, 1
%exit1 = icmp eq i64 %indvars.iv.next.3, 100
; REMARKS-NEXT: Name: UnsupportedPHIOuter
; REMARKS-NEXT: Function: test3
-define i64 @test3([100 x [100 x i64]]* %Arr) {
+define i64 @test3(ptr %Arr) {
entry:
br label %for1.header
for2: ; preds = %for2, %for1.header
%indvars.iv = phi i64 [ 0, %for1.header ], [ %indvars.iv.next.3, %for2 ]
%sum.inner = phi i64 [ %so, %for1.header ], [ %sum.inc, %for2 ]
- %arrayidx = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* %Arr, i64 0, i64 %indvars.iv, i64 %indvars.iv23
- %lv = load i64, i64* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [100 x [100 x i64]], ptr %Arr, i64 0, i64 %indvars.iv, i64 %indvars.iv23
+ %lv = load i64, ptr %arrayidx, align 4
%sum.inc = add i64 %sum.inner, %lv
%indvars.iv.next.3 = add nuw nsw i64 %indvars.iv, 1
%exit1 = icmp eq i64 %indvars.iv.next.3, 100
; REMARKS-NEXT: Name: UnsupportedPHIOuter
; REMARKS-NEXT: Function: test4
-define i64 @test4([100 x [100 x i64]]* %Arr, i64* %dst) {
+define i64 @test4(ptr %Arr, ptr %dst) {
entry:
- %gep.dst = getelementptr inbounds i64, i64* %dst, i64 42
+ %gep.dst = getelementptr inbounds i64, ptr %dst, i64 42
br label %for1.header
for1.header: ; preds = %for1.inc, %entry
for2: ; preds = %for2, %for1.header
%indvars.iv = phi i64 [ 0, %for1.header ], [ %indvars.iv.next.3, %for2 ]
%sum.inner = phi i64 [ %sum.outer, %for1.header ], [ %sum.inc, %for2 ]
- %arrayidx = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* %Arr, i64 0, i64 %indvars.iv, i64 %indvars.iv23
- %lv = load i64, i64* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [100 x [100 x i64]], ptr %Arr, i64 0, i64 %indvars.iv, i64 %indvars.iv23
+ %lv = load i64, ptr %arrayidx, align 4
%sum.inc = add i64 %sum.inner, %lv
- store i64 %sum.inc, i64* %gep.dst, align 4
+ store i64 %sum.inc, ptr %gep.dst, align 4
%indvars.iv.next.3 = add nuw nsw i64 %indvars.iv, 1
%exit1 = icmp eq i64 %indvars.iv.next.3, 100
br i1 %exit1, label %for1.inc, label %for2
; REMARKS-NEXT: Name: UnsupportedPHIOuter
; REMARKS-NEXT: Function: test_constant_inner_loop_res
-define i64 @test_constant_inner_loop_res([100 x [100 x i64]]* %Arr) {
+define i64 @test_constant_inner_loop_res(ptr %Arr) {
entry:
br label %for1.header
for2: ; preds = %for2, %for1.header
%indvars.iv = phi i64 [ 0, %for1.header ], [ %indvars.iv.next.3, %for2 ]
%sum.inner = phi i64 [ %sum.outer, %for1.header ], [ %sum.inc, %for2 ]
- %arrayidx = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* %Arr, i64 0, i64 %indvars.iv, i64 %indvars.iv23
- %lv = load i64, i64* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [100 x [100 x i64]], ptr %Arr, i64 0, i64 %indvars.iv, i64 %indvars.iv23
+ %lv = load i64, ptr %arrayidx, align 4
%sum.inc = add i64 %sum.inner, %lv
%indvars.iv.next.3 = add nuw nsw i64 %indvars.iv, 1
%exit1 = icmp eq i64 %indvars.iv.next.3, 100
; REMARKS-NEXT: Name: Interchanged
; REMARKS-NEXT: Function: test5
-define float @test5([100 x [100 x float]]* %Arr, [100 x [100 x float]]* %Arr2) {
+define float @test5(ptr %Arr, ptr %Arr2) {
entry:
br label %outer.header
for.body3: ; preds = %for.body3, %outer.header
%float.inner = phi float [ %float.outer , %outer.header ], [ %float.inner.inc.inc, %for.body3 ]
%iv.inner = phi i64 [ %iv.inner.next, %for.body3 ], [ 1, %outer.header ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x float]], [100 x [100 x float]]* %Arr, i64 0, i64 %iv.inner, i64 %iv.outer
- %vA = load float, float* %arrayidx5
+ %arrayidx5 = getelementptr inbounds [100 x [100 x float]], ptr %Arr, i64 0, i64 %iv.inner, i64 %iv.outer
+ %vA = load float, ptr %arrayidx5
%float.inner.inc = fadd fast float %float.inner, %vA
- %arrayidx6 = getelementptr inbounds [100 x [100 x float]], [100 x [100 x float]]* %Arr2, i64 0, i64 %iv.inner, i64 %iv.outer
- %vB = load float, float* %arrayidx6
+ %arrayidx6 = getelementptr inbounds [100 x [100 x float]], ptr %Arr2, i64 0, i64 %iv.inner, i64 %iv.outer
+ %vB = load float, ptr %arrayidx6
%float.inner.inc.inc = fadd fast float %float.inner.inc, %vB
%iv.inner.next = add nuw nsw i64 %iv.inner, 1
%exitcond = icmp eq i64 %iv.inner.next, 100
; REMARKS-NEXT: Name: UnsupportedPHIOuter
; REMARKS-NEXT: Function: test6
-define float @test6([100 x [100 x float]]* %Arr, [100 x [100 x float]]* %Arr2) {
+define float @test6(ptr %Arr, ptr %Arr2) {
entry:
br label %outer.header
for.body3: ; preds = %for.body3, %outer.header
%float.inner = phi float [ %float.outer , %outer.header ], [ %float.inner.inc.inc, %for.body3 ]
%iv.inner = phi i64 [ %iv.inner.next, %for.body3 ], [ 1, %outer.header ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x float]], [100 x [100 x float]]* %Arr, i64 0, i64 %iv.inner, i64 %iv.outer
- %vA = load float, float* %arrayidx5
+ %arrayidx5 = getelementptr inbounds [100 x [100 x float]], ptr %Arr, i64 0, i64 %iv.inner, i64 %iv.outer
+ %vA = load float, ptr %arrayidx5
%float.inner.inc = fadd float %float.inner, %vA ; do not allow reassociation
- %arrayidx6 = getelementptr inbounds [100 x [100 x float]], [100 x [100 x float]]* %Arr2, i64 0, i64 %iv.inner, i64 %iv.outer
- %vB = load float, float* %arrayidx6
+ %arrayidx6 = getelementptr inbounds [100 x [100 x float]], ptr %Arr2, i64 0, i64 %iv.inner, i64 %iv.outer
+ %vB = load float, ptr %arrayidx6
%float.inner.inc.inc = fadd fast float %float.inner.inc, %vB
%iv.inner.next = add nuw nsw i64 %iv.inner, 1
%exitcond = icmp eq i64 %iv.inner.next, 100
; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ [[TMP0:%.*]], [[INNER_HEADER_SPLIT:%.*]] ], [ 5, [[INNER_HEADER_PREHEADER]] ]
; CHECK-NEXT: br label [[BB1]]
; CHECK: inner.header.split1:
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [1000 x [1000 x i32]], [1000 x [1000 x i32]]* @global, i64 0, i64 [[INNER_IV]], i64 [[OUTER_IV]]
-; CHECK-NEXT: [[LV:%.*]] = load i32, i32* [[PTR]]
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [1000 x [1000 x i32]], ptr @global, i64 0, i64 [[INNER_IV]], i64 [[OUTER_IV]]
+; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[PTR]]
; CHECK-NEXT: [[V:%.*]] = mul i32 [[LV]], 100
-; CHECK-NEXT: store i32 [[V]], i32* [[PTR]]
+; CHECK-NEXT: store i32 [[V]], ptr [[PTR]]
; CHECK-NEXT: [[INNER_IV_NEXT:%.*]] = add nsw i64 [[INNER_IV]], 1
; CHECK-NEXT: [[COND1:%.*]] = icmp eq i64 [[INNER_IV_NEXT]], 1000
; CHECK-NEXT: br label [[OUTER_LATCH]]
inner.header: ; preds = %inner.header, %outer.header
%inner.iv = phi i64 [ %inner.iv.next, %inner.header ], [ 5, %outer.header ]
- %ptr = getelementptr inbounds [1000 x [1000 x i32]], [1000 x [1000 x i32]]* @global, i64 0, i64 %inner.iv, i64 %outer.iv
- %lv = load i32, i32* %ptr
+ %ptr = getelementptr inbounds [1000 x [1000 x i32]], ptr @global, i64 0, i64 %inner.iv, i64 %outer.iv
+ %lv = load i32, ptr %ptr
%v = mul i32 %lv, 100
- store i32 %v, i32* %ptr
+ store i32 %v, ptr %ptr
%inner.iv.next = add nsw i64 %inner.iv, 1
%cond1 = icmp eq i64 %inner.iv.next , 1000
br i1 %cond1, label %outer.latch, label %inner.header
; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ [[TMP0:%.*]], [[INNER_HEADER_SPLIT:%.*]] ], [ 5, [[INNER_HEADER_PREHEADER]] ]
; CHECK-NEXT: br label [[OUTER_HEADER_PREHEADER]]
; CHECK: inner.header.split1:
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [1000 x [1000 x i32]], [1000 x [1000 x i32]]* @global, i64 0, i64 [[INNER_IV]], i64 [[OUTER_IV]]
-; CHECK-NEXT: [[LV:%.*]] = load i32, i32* [[PTR]]
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [1000 x [1000 x i32]], ptr @global, i64 0, i64 [[INNER_IV]], i64 [[OUTER_IV]]
+; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[PTR]]
; CHECK-NEXT: [[V:%.*]] = mul i32 [[LV]], 100
-; CHECK-NEXT: store i32 [[V]], i32* [[PTR]]
+; CHECK-NEXT: store i32 [[V]], ptr [[PTR]]
; CHECK-NEXT: [[INNER_IV_NEXT:%.*]] = add nsw i64 [[INNER_IV]], 1
; CHECK-NEXT: [[COND1:%.*]] = icmp eq i64 [[INNER_IV_NEXT]], 1000
; CHECK-NEXT: br label [[OUTER_LATCH]]
inner.header: ; preds = %inner.header, %outer.header
%inner.iv = phi i64 [ %inner.iv.next, %inner.header ], [ 5, %outer.header ], [ 5, %outer.header ]
- %ptr = getelementptr inbounds [1000 x [1000 x i32]], [1000 x [1000 x i32]]* @global, i64 0, i64 %inner.iv, i64 %outer.iv
- %lv = load i32, i32* %ptr
+ %ptr = getelementptr inbounds [1000 x [1000 x i32]], ptr @global, i64 0, i64 %inner.iv, i64 %outer.iv
+ %lv = load i32, ptr %ptr
%v = mul i32 %lv, 100
- store i32 %v, i32* %ptr
+ store i32 %v, ptr %ptr
%inner.iv.next = add nsw i64 %inner.iv, 1
%cond1 = icmp eq i64 %inner.iv.next , 1000
br i1 %cond1, label %outer.latch, label %inner.header
; The test contains a GEP with an operand that is not SCEV-able. Make sure
; loop-interchange does not crash.
-define void @test([256 x float]* noalias %src, float* %dst) {
+define void @test(ptr noalias %src, ptr %dst) {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[INNER_PREHEADER:%.*]]
; CHECK-NEXT: [[J:%.*]] = phi i64 [ [[TMP0:%.*]], [[INNER_SPLIT:%.*]] ], [ 0, [[INNER_PREHEADER]] ]
; CHECK-NEXT: br label [[OUTER_HEADER_PREHEADER]]
; CHECK: inner.split1:
-; CHECK-NEXT: [[SRC_GEP:%.*]] = getelementptr inbounds [256 x float], [256 x float]* [[SRC:%.*]], <2 x i64> <i64 0, i64 1>, i64 [[J]]
-; CHECK-NEXT: [[SRC_0:%.*]] = extractelement <2 x float*> [[SRC_GEP]], i32 0
-; CHECK-NEXT: [[LV_0:%.*]] = load float, float* [[SRC_0]], align 4
+; CHECK-NEXT: [[SRC_GEP:%.*]] = getelementptr inbounds [256 x float], ptr [[SRC:%.*]], <2 x i64> <i64 0, i64 1>, i64 [[J]]
+; CHECK-NEXT: [[SRC_0:%.*]] = extractelement <2 x ptr> [[SRC_GEP]], i32 0
+; CHECK-NEXT: [[LV_0:%.*]] = load float, ptr [[SRC_0]], align 4
; CHECK-NEXT: [[ADD_0:%.*]] = fadd float [[LV_0]], 1.000000e+00
-; CHECK-NEXT: [[DST_GEP:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 [[J]]
-; CHECK-NEXT: store float [[ADD_0]], float* [[DST_GEP]], align 4
+; CHECK-NEXT: [[DST_GEP:%.*]] = getelementptr inbounds float, ptr [[DST:%.*]], i64 [[J]]
+; CHECK-NEXT: store float [[ADD_0]], ptr [[DST_GEP]], align 4
; CHECK-NEXT: [[J_NEXT:%.*]] = add nuw nsw i64 [[J]], 1
; CHECK-NEXT: [[INNER_EXITCOND:%.*]] = icmp eq i64 [[J_NEXT]], 100
; CHECK-NEXT: br label [[OUTER_LATCH]]
inner:
%j = phi i64 [ 0, %outer.header ], [ %j.next, %inner ]
- %src.gep = getelementptr inbounds [256 x float], [256 x float]* %src, <2 x i64> <i64 0, i64 1>, i64 %j
- %src.0 = extractelement <2 x float*> %src.gep, i32 0
- %lv.0 = load float, float* %src.0
+ %src.gep = getelementptr inbounds [256 x float], ptr %src, <2 x i64> <i64 0, i64 1>, i64 %j
+ %src.0 = extractelement <2 x ptr> %src.gep, i32 0
+ %lv.0 = load float, ptr %src.0
%add.0 = fadd float %lv.0, 1.0
- %dst.gep = getelementptr inbounds float, float* %dst, i64 %j
- store float %add.0, float* %dst.gep
+ %dst.gep = getelementptr inbounds float, ptr %dst, i64 %j
+ store float %add.0, ptr %dst.gep
%j.next = add nuw nsw i64 %j, 1
%inner.exitcond = icmp eq i64 %j.next, 100
br i1 %inner.exitcond, label %outer.latch, label %inner
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @f(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i64 %N) {
+define void @f(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, i64 %N) {
entry:
-; CHECK: %load_initial = load i32, i32* %A
+; CHECK: %load_initial = load i32, ptr %A
br label %for.body
for.body: ; preds = %for.body, %entry
; CHECK: %store_forwarded = phi i32 [ %load_initial, %entry ], [ %add, %for.body ]
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %load = load i32, i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %load_1 = load i32, i32* %arrayidx2, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
+ %load = load i32, ptr %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %load_1 = load i32, ptr %arrayidx2, align 4
; CHECK: %add = add i32 %load_1, %store_forwarded
%add = add i32 %load_1, %load
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %arrayidx_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
- store i32 %add, i32* %arrayidx_next, align 4
+ %arrayidx_next = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+ store i32 %add, ptr %arrayidx_next, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.11.0"
-define void @f(i32* %array, i32 %n) {
+define void @f(ptr %array, i32 %n) {
entry:
%cmp10 = icmp sgt i32 %n, 0
br i1 %cmp10, label %for.body, label %for.cond.cleanup
cond.true: ; preds = %for.body
%0 = add nsw i64 %indvars.iv, -1
- %arrayidx = getelementptr inbounds i32, i32* %array, i64 %0
- %1 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %array, i64 %0
+ %1 = load i32, ptr %arrayidx, align 4
br label %cond.end
cond.end: ; preds = %for.body, %cond.true
%cond = phi i32 [ %1, %cond.true ], [ 0, %for.body ]
; CHECK: %cond = phi i32 [ %1, %cond.true ], [ 0, %for.body ]
%add = add nsw i32 %cond, 4
- %arrayidx3 = getelementptr inbounds i32, i32* %array, i64 %indvars.iv
- store i32 %add, i32* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr %array, i64 %indvars.iv
+ store i32 %add, ptr %arrayidx3, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
; CHECK-LABEL: @f_convergent(
; CHECK: call i32 @llvm.convergent(
; CHECK-NOT: call i32 @llvm.convergent(
-define void @f_convergent(i32* %A, i32* %B, i32* %C, i64 %N) #0 {
+define void @f_convergent(ptr %A, ptr %B, ptr %C, i64 %N) #0 {
entry:
br label %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %Aidx_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
- %Bidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %Cidx = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
- %Aidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %Aidx_next = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+ %Bidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %Cidx = getelementptr inbounds i32, ptr %C, i64 %indvars.iv
+ %Aidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
- %b = load i32, i32* %Bidx, align 4
+ %b = load i32, ptr %Bidx, align 4
%convergent.b = call i32 @llvm.convergent(i32 %b)
%a_p1 = add i32 %convergent.b, 2
- store i32 %a_p1, i32* %Aidx_next, align 4
+ store i32 %a_p1, ptr %Aidx_next, align 4
- %a = load i32, i32* %Aidx, align 1
+ %a = load i32, ptr %Aidx, align 1
%c = mul i32 %a, 2
- store i32 %c, i32* %Cidx, align 4
+ store i32 %c, ptr %Cidx, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @f(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i64 %N) {
+define void @f(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, i64 %N) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
; CHECK-NOT: %store_forwarded
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- store i32 1, i32* %arrayidx, align 4
- %a = load i32, i32* %arrayidx, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %b = load i32, i32* %arrayidxB, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
+ store i32 1, ptr %arrayidx, align 4
+ %a = load i32, ptr %arrayidx, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %b = load i32, ptr %arrayidxB, align 4
; CHECK: %add = add i32 %b, %a
%add = add i32 %b, %a
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %arrayidx_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
- store i32 %add, i32* %arrayidx_next, align 4
+ %arrayidx_next = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+ store i32 %add, ptr %arrayidx_next, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @f(i32* %A, i32* %B, i32* %C, i64 %N) {
+define void @f(ptr %A, ptr %B, ptr %C, i64 %N) {
; CHECK: for.body.lver.check:
; CHECK: %found.conflict{{.*}} =
entry:
; Make sure the hoisted load keeps the alignment
-; CHECK: %load_initial = load i32, i32* %A, align 1
+; CHECK: %load_initial = load i32, ptr %A, align 1
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %Aidx_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
- %Bidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %Cidx = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
- %Aidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %Aidx_next = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+ %Bidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %Cidx = getelementptr inbounds i32, ptr %C, i64 %indvars.iv
+ %Aidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
- %b = load i32, i32* %Bidx, align 4
+ %b = load i32, ptr %Bidx, align 4
%a_p1 = add i32 %b, 2
- store i32 %a_p1, i32* %Aidx_next, align 4
+ store i32 %a_p1, ptr %Aidx_next, align 4
- %a = load i32, i32* %Aidx, align 1
+ %a = load i32, ptr %Aidx, align 1
; CHECK: %c = mul i32 %store_forwarded, 2
%c = mul i32 %a, 2
- store i32 %c, i32* %Cidx, align 4
+ store i32 %c, ptr %Cidx, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
; CHECK: for.cond3:
; CHECK-NEXT: [[H_0:%.*]] = phi i8 [ 0, [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[COND_END_FOR_COND_CLEANUP_LOOPEXIT_CRIT_EDGE:%.*]] ]
; CHECK-NEXT: [[IDXPROM11:%.*]] = sext i8 [[H_0]] to i64
-; CHECK-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds [1 x i32], [1 x i32]* @a, i64 0, i64 [[IDXPROM11]]
+; CHECK-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds [1 x i32], ptr @a, i64 0, i64 [[IDXPROM11]]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: cond.end.for.cond.cleanup.loopexit_crit_edge:
; CHECK-NEXT: [[ADD]] = add i8 [[H_0]], [[INC:%.*]]
; CHECK-NEXT: br label [[FOR_COND3]]
; CHECK: for.body:
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX27]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX27]], align 4
; CHECK-NEXT: br i1 true, label [[COND_END_FOR_COND_CLEANUP_LOOPEXIT_CRIT_EDGE]], label [[FOR_BODY]]
;
entry:
for.cond3: ; preds = %cond.end.for.cond.cleanup.loopexit_crit_edge, %entry
%h.0 = phi i8 [ 0, %entry ], [ %add, %cond.end.for.cond.cleanup.loopexit_crit_edge ]
%idxprom11 = sext i8 %h.0 to i64
- %arrayidx27 = getelementptr inbounds [1 x i32], [1 x i32]* @a, i64 0, i64 %idxprom11
+ %arrayidx27 = getelementptr inbounds [1 x i32], ptr @a, i64 0, i64 %idxprom11
br label %for.body
cond.end.for.cond.cleanup.loopexit_crit_edge: ; preds = %for.body
br label %for.cond3
for.body: ; preds = %for.body, %for.cond3
- store i32 0, i32* %arrayidx27, align 4
+ store i32 0, ptr %arrayidx27, align 4
br i1 true, label %cond.end.for.cond.cleanup.loopexit_crit_edge, label %for.body
}
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @f(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i64 %N, i1 %C) {
+define void @f(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, i64 %N, i1 %C) {
entry:
br i1 %C, label %for.body, label %for.end
; CHECK: for.body.preheader:
-; CHECK-NEXT: %load_initial = load i32, i32* %A
+; CHECK-NEXT: %load_initial = load i32, ptr %A
; CHECK-NEXT: br label %for.body
; CHECK: for.body:
for.body:
; CHECK-NEXT: %store_forwarded = phi i32 [ %load_initial, %for.body.preheader ], [ %add, %for.body ]
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %load = load i32, i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %load_1 = load i32, i32* %arrayidx2, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
+ %load = load i32, ptr %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %load_1 = load i32, ptr %arrayidx2, align 4
; CHECK: %add = add i32 %load_1, %store_forwarded
%add = add i32 %load_1, %load
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %arrayidx_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
- store i32 %add, i32* %arrayidx_next, align 4
+ %arrayidx_next = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+ store i32 %add, ptr %arrayidx_next, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @f(i32* %A, i32* %B, i32* %C, i64 %N, i32* %D) {
+define void @f(ptr %A, ptr %B, ptr %C, i64 %N, ptr %D) {
entry:
br label %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %Aidx_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
- %Bidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %Cidx = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
- %Aidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %Didx = getelementptr inbounds i32, i32* %D, i64 %indvars.iv
+ %Aidx_next = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+ %Bidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %Cidx = getelementptr inbounds i32, ptr %C, i64 %indvars.iv
+ %Aidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
+ %Didx = getelementptr inbounds i32, ptr %D, i64 %indvars.iv
- %b = load i32, i32* %Bidx, align 4
+ %b = load i32, ptr %Bidx, align 4
%a_p1 = add i32 %b, 2
- store i32 %a_p1, i32* %Aidx_next, align 4
+ store i32 %a_p1, ptr %Aidx_next, align 4
- %a = load i32, i32* %Aidx, align 4
+ %a = load i32, ptr %Aidx, align 4
; CHECK: %c = mul i32 %a, 2
; AGGRESSIVE: %c = mul i32 %store_forwarded, 2
%c = mul i32 %a, 2
- store i32 %c, i32* %Cidx, align 4
- store i32 2, i32* %Didx, align 4
+ store i32 %c, ptr %Cidx, align 4
+ store i32 2, ptr %Didx, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @f(i32* noalias nocapture %A, i32* noalias nocapture readonly %B,
- i32* noalias nocapture %C, i32* noalias nocapture readonly %D,
+define void @f(ptr noalias nocapture %A, ptr noalias nocapture readonly %B,
+ ptr noalias nocapture %C, ptr noalias nocapture readonly %D,
i64 %N) {
entry:
-; CHECK: %load_initial = load i32, i32* %A
+; CHECK: %load_initial = load i32, ptr %A
br label %for.body
for.body: ; preds = %for.body, %entry
; CHECK: %store_forwarded = phi i32 [ %load_initial, %entry ], [ %addD, %for.body ]
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
+ %loadA = load i32, ptr %arrayidxA, align 4
; CHECK: %addA = add i32 %store_forwarded, 1
%addA = add i32 %loadA, 1
- %arrayidxB = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- store i32 %addA, i32* %arrayidxB, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ store i32 %addA, ptr %arrayidxB, align 4
- %arrayidxC = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
- %loadC = load i32, i32* %arrayidxC, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %C, i64 %indvars.iv
+ %loadC = load i32, ptr %arrayidxC, align 4
%addC = add i32 %loadC, 2
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %arrayidxA_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
- store i32 %addC, i32* %arrayidxA_next, align 4
+ %arrayidxA_next = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+ store i32 %addC, ptr %arrayidxA_next, align 4
- %arrayidxD = getelementptr inbounds i32, i32* %D, i64 %indvars.iv
- %loadD = load i32, i32* %arrayidxD, align 4
+ %arrayidxD = getelementptr inbounds i32, ptr %D, i64 %indvars.iv
+ %loadD = load i32, ptr %arrayidxD, align 4
%addD = add i32 %loadD, 3
- store i32 %addD, i32* %arrayidxA_next, align 4
+ store i32 %addD, ptr %arrayidxA_next, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
; Make sure it doesn't crash in new pass manager due to missing preheader.
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @test(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i64 %N, i1 %C) {
+define void @test(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, i64 %N, i1 %C) {
entry:
br i1 %C, label %for.body, label %for.end
; CHECK: test
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %load = load i32, i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %load_1 = load i32, i32* %arrayidx2, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
+ %load = load i32, ptr %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %load_1 = load i32, ptr %arrayidx2, align 4
%add = add i32 %load_1, %load
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %arrayidx_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
- store i32 %add, i32* %arrayidx_next, align 4
+ %arrayidx_next = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+ store i32 %add, ptr %arrayidx_next, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @f([2 x i32]* noalias %A, i32* noalias %B, i32* noalias %C, i64 %N) {
+define void @f(ptr noalias %A, ptr noalias %B, ptr noalias %C, i64 %N) {
entry:
br label %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %A1idx = getelementptr inbounds [2 x i32], [2 x i32]* %A, i64 %indvars.iv, i32 1
- %Bidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %Cidx = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
- %A0idx = getelementptr inbounds [2 x i32], [2 x i32]* %A, i64 %indvars.iv, i32 0
+ %A1idx = getelementptr inbounds [2 x i32], ptr %A, i64 %indvars.iv, i32 1
+ %Bidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %Cidx = getelementptr inbounds i32, ptr %C, i64 %indvars.iv
+ %A0idx = getelementptr inbounds [2 x i32], ptr %A, i64 %indvars.iv, i32 0
- %b = load i32, i32* %Bidx, align 4
+ %b = load i32, ptr %Bidx, align 4
%a_p1 = add i32 %b, 2
- store i32 %a_p1, i32* %A1idx, align 4
+ store i32 %a_p1, ptr %A1idx, align 4
-; CHECK: %a = load i32, i32* %A0idx, align 4
- %a = load i32, i32* %A0idx, align 4
+; CHECK: %a = load i32, ptr %A0idx, align 4
+ %a = load i32, ptr %A0idx, align 4
; CHECK: %c = mul i32 %a, 2
%c = mul i32 %a, 2
- store i32 %c, i32* %Cidx, align 4
+ store i32 %c, ptr %Cidx, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
; CHECK-LABEL: @f(
-define void @f(i32* %A, i32* %B, i32* %C, i64 %N) optsize {
+define void @f(ptr %A, ptr %B, ptr %C, i64 %N) optsize {
entry:
br label %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %Aidx_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
- %Bidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %Cidx = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
- %Aidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %Aidx_next = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+ %Bidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %Cidx = getelementptr inbounds i32, ptr %C, i64 %indvars.iv
+ %Aidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
- %b = load i32, i32* %Bidx, align 4
+ %b = load i32, ptr %Bidx, align 4
%a_p1 = add i32 %b, 2
- store i32 %a_p1, i32* %Aidx_next, align 4
+ store i32 %a_p1, ptr %Aidx_next, align 4
- %a = load i32, i32* %Aidx, align 4
+ %a = load i32, ptr %Aidx, align 4
; CHECK: %c = mul i32 %a, 2
%c = mul i32 %a, 2
- store i32 %c, i32* %Cidx, align 4
+ store i32 %c, ptr %Cidx, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
; with -Os.
; CHECK-LABEL: @g(
-define void @g(i32* noalias %A, i32* %B, i32* noalias %C, i64 %N) optsize {
+define void @g(ptr noalias %A, ptr %B, ptr noalias %C, i64 %N) optsize {
entry:
br label %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %Aidx_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
- %Bidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %Cidx = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
- %Aidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %Aidx_next = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+ %Bidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %Cidx = getelementptr inbounds i32, ptr %C, i64 %indvars.iv
+ %Aidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
- %b = load i32, i32* %Bidx, align 4
+ %b = load i32, ptr %Bidx, align 4
%a_p1 = add i32 %b, 2
- store i32 %a_p1, i32* %Aidx_next, align 4
+ store i32 %a_p1, ptr %Aidx_next, align 4
- %a = load i32, i32* %Aidx, align 4
+ %a = load i32, ptr %Aidx, align 4
; CHECK: %c = mul i32 %store_forwarded, 2
%c = mul i32 %a, 2
- store i32 %c, i32* %Cidx, align 4
+ store i32 %c, ptr %Cidx, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
; PGSO-LABEL: @f_pgso(
; NPGSO-LABEL: @f_pgso(
-define void @f_pgso(i32* %A, i32* %B, i32* %C, i64 %N) !prof !14 {
+define void @f_pgso(ptr %A, ptr %B, ptr %C, i64 %N) !prof !14 {
entry:
br label %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %Aidx_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
- %Bidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %Cidx = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
- %Aidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %Aidx_next = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+ %Bidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %Cidx = getelementptr inbounds i32, ptr %C, i64 %indvars.iv
+ %Aidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
- %b = load i32, i32* %Bidx, align 4
+ %b = load i32, ptr %Bidx, align 4
%a_p1 = add i32 %b, 2
- store i32 %a_p1, i32* %Aidx_next, align 4
+ store i32 %a_p1, ptr %Aidx_next, align 4
- %a = load i32, i32* %Aidx, align 4
+ %a = load i32, ptr %Aidx, align 4
; PGSO: %c = mul i32 %a, 2
; NPGSO-NOT: %c = mul i32 %a, 2
%c = mul i32 %a, 2
- store i32 %c, i32* %Cidx, align 4
+ store i32 %c, ptr %Cidx, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
%tmp19 = phi i64 [ 0, %bb12 ], [ %tmp27, %bb18 ]
%tmp20 = add i64 %tmp19, 3
%tmp21 = add i64 %tmp19, 5
- %tmp22 = getelementptr inbounds i32, i32 addrspace(1)* undef, i64 %tmp20
- %tmp23 = bitcast i32 addrspace(1)* %tmp22 to <2 x i32> addrspace(1)*
- %tmp24 = load <2 x i32>, <2 x i32> addrspace(1)* %tmp23, align 4
- %tmp25 = getelementptr inbounds i32, i32 addrspace(1)* undef, i64 %tmp21
- %tmp26 = bitcast i32 addrspace(1)* %tmp25 to <2 x i32> addrspace(1)*
- store <2 x i32> undef, <2 x i32> addrspace(1)* %tmp26, align 4
+ %tmp22 = getelementptr inbounds i32, ptr addrspace(1) undef, i64 %tmp20
+ %tmp24 = load <2 x i32>, ptr addrspace(1) %tmp22, align 4
+ %tmp25 = getelementptr inbounds i32, ptr addrspace(1) undef, i64 %tmp21
+ store <2 x i32> undef, ptr addrspace(1) %tmp25, align 4
%tmp27 = add i64 %tmp19, 2
%tmp28 = icmp eq i64 %tmp27, %tmp17
br i1 %tmp28, label %bb29, label %bb18
target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7"
target triple = "amdgcn-amd-amdhsa"
-%struct.foo = type { %struct.pluto, i8, i8*, i32 }
-%struct.pluto = type { i32, i32, i32, %struct.wombat*, i32, i32, i32 }
+%struct.foo = type { %struct.pluto, i8, ptr, i32 }
+%struct.pluto = type { i32, i32, i32, ptr, i32, i32, i32 }
%struct.wombat = type { %struct.barney }
%struct.barney = type { <2 x float> }
define protected amdgpu_kernel void @widget(i32 %arg, i32 %arg1) #0 {
; CHECK-LABEL: @widget(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds [4000 x float], [4000 x float] addrspace(3)* @global.1, i32 0, i32 [[ARG:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = load %struct.wombat*, %struct.wombat* addrspace(4)* getelementptr inbounds (%struct.foo, [[STRUCT_FOO:%.*]] addrspace(4)* @global, i64 0, i32 0, i32 3), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_WOMBAT:%.*]], %struct.wombat* [[TMP2]], i64 undef, i32 0
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast %struct.barney* [[TMP3]] to i64*
+; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds [4000 x float], ptr addrspace(3) @global.1, i32 0, i32 [[ARG:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr addrspace(4) getelementptr inbounds (%struct.foo, ptr addrspace(4) @global, i64 0, i32 0, i32 3), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_WOMBAT:%.*]], ptr [[TMP2]], i64 undef, i32 0
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb5.loopexit:
; CHECK-NEXT: br label [[BB5]]
; CHECK-NEXT: [[TMP8:%.*]] = mul nsw i32 [[TMP7]], undef
; CHECK-NEXT: [[TMP9:%.*]] = add i32 [[TMP8]], undef
; CHECK-NEXT: [[TMP10:%.*]] = sext i32 [[TMP9]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_WOMBAT]], %struct.wombat* [[TMP2]], i64 [[TMP10]]
-; CHECK-NEXT: [[TMP12:%.*]] = bitcast %struct.wombat* [[TMP11]] to i64*
-; CHECK-NEXT: [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_WOMBAT]], ptr [[TMP2]], i64 [[TMP10]]
+; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP11]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = srem i32 1, [[ARG1:%.*]]
; CHECK-NEXT: [[TMP15:%.*]] = add nuw nsw i32 [[TMP14]], 1
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4000 x float], [4000 x float] addrspace(3)* @global.1, i32 0, i32 [[TMP15]]
-; CHECK-NEXT: [[TMP17:%.*]] = load float, float addrspace(3)* [[TMP16]], align 4
-; CHECK-NEXT: [[TMP18:%.*]] = load float, float addrspace(3)* [[TMP]], align 4
-; CHECK-NEXT: store i64 [[TMP13]], i64* [[TMP4]], align 8
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4000 x float], ptr addrspace(3) @global.1, i32 0, i32 [[TMP15]]
+; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr addrspace(3) [[TMP16]], align 4
+; CHECK-NEXT: [[TMP18:%.*]] = load float, ptr addrspace(3) [[TMP]], align 4
+; CHECK-NEXT: store i64 [[TMP13]], ptr [[TMP3]], align 8
; CHECK-NEXT: [[TMP19]] = add nsw i32 [[TMP7]], 1
; CHECK-NEXT: [[TMP20:%.*]] = icmp slt i32 [[TMP7]], 3
; CHECK-NEXT: br i1 [[TMP20]], label [[BB6]], label [[BB5_LOOPEXIT:%.*]]
;
bb:
- %tmp = getelementptr inbounds [4000 x float], [4000 x float] addrspace(3)* @global.1, i32 0, i32 %arg
- %tmp2 = load %struct.wombat*, %struct.wombat* addrspace(4)* getelementptr inbounds (%struct.foo, %struct.foo addrspace(4)* @global, i64 0, i32 0, i32 3), align 8
- %tmp3 = getelementptr inbounds %struct.wombat, %struct.wombat* %tmp2, i64 undef, i32 0
- %tmp4 = bitcast %struct.barney* %tmp3 to i64*
+ %tmp = getelementptr inbounds [4000 x float], ptr addrspace(3) @global.1, i32 0, i32 %arg
+ %tmp2 = load ptr, ptr addrspace(4) getelementptr inbounds (%struct.foo, ptr addrspace(4) @global, i64 0, i32 0, i32 3), align 8
+ %tmp3 = getelementptr inbounds %struct.wombat, ptr %tmp2, i64 undef, i32 0
br label %bb5
bb5: ; preds = %bb6, %bb
%tmp8 = mul nsw i32 %tmp7, undef
%tmp9 = add i32 %tmp8, undef
%tmp10 = sext i32 %tmp9 to i64
- %tmp11 = getelementptr inbounds %struct.wombat, %struct.wombat* %tmp2, i64 %tmp10
- %tmp12 = bitcast %struct.wombat* %tmp11 to i64*
- %tmp13 = load i64, i64* %tmp12, align 8
+ %tmp11 = getelementptr inbounds %struct.wombat, ptr %tmp2, i64 %tmp10
+ %tmp13 = load i64, ptr %tmp11, align 8
%tmp14 = srem i32 1, %arg1
%tmp15 = add nuw nsw i32 %tmp14, 1
- %tmp16 = getelementptr inbounds [4000 x float], [4000 x float] addrspace(3)* @global.1, i32 0, i32 %tmp15
- %tmp17 = load float, float addrspace(3)* %tmp16, align 4
- %tmp18 = load float, float addrspace(3)* %tmp, align 4
- store i64 %tmp13, i64* %tmp4, align 8
+ %tmp16 = getelementptr inbounds [4000 x float], ptr addrspace(3) @global.1, i32 0, i32 %tmp15
+ %tmp17 = load float, ptr addrspace(3) %tmp16, align 4
+ %tmp18 = load float, ptr addrspace(3) %tmp, align 4
+ store i64 %tmp13, ptr %tmp3, align 8
%tmp19 = add nsw i32 %tmp7, 1
%tmp20 = icmp slt i32 %tmp7, 3
br i1 %tmp20, label %bb6, label %bb5
%tmp8 = phi i32 [ %tmp15, %bb7 ], [ %tmp3, %bb6 ]
%tmp9 = phi i32 [ %tmp8, %bb7 ], [ %tmp, %bb6 ]
%tmp10 = zext i32 %tmp9 to i64
- %tmp11 = getelementptr inbounds float, float addrspace(1)* null, i64 %tmp10
- %tmp12 = load float, float addrspace(1)* %tmp11, align 4
+ %tmp11 = getelementptr inbounds float, ptr addrspace(1) null, i64 %tmp10
+ %tmp12 = load float, ptr addrspace(1) %tmp11, align 4
%tmp13 = zext i32 %tmp8 to i64
- %tmp14 = getelementptr inbounds float, float addrspace(1)* null, i64 %tmp13
- store float 1.000000e+00, float addrspace(1)* %tmp14, align 4
+ %tmp14 = getelementptr inbounds float, ptr addrspace(1) null, i64 %tmp13
+ store float 1.000000e+00, ptr addrspace(1) %tmp14, align 4
%tmp15 = add nuw nsw i32 %tmp8, 1
%tmp16 = icmp sgt i32 %tmp8, 78
br i1 %tmp16, label %bb17, label %bb7
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @f(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i64 %N,
+define void @f(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, i64 %N,
;
;
;
; DEFAULT: for.body.lver.orig:
; DEFAULT-NEXT: [[INDVARS_IV_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INDVARS_IV_NEXT_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
; DEFAULT-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i64 [[INDVARS_IV_LVER_ORIG]], [[STRIDE]]
-; DEFAULT-NEXT: [[ARRAYIDX_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[MUL_LVER_ORIG]]
-; DEFAULT-NEXT: [[LOAD_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDX_LVER_ORIG]], align 4
-; DEFAULT-NEXT: [[ARRAYIDX2_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV_LVER_ORIG]]
-; DEFAULT-NEXT: [[LOAD_1_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDX2_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[MUL_LVER_ORIG]]
+; DEFAULT-NEXT: [[LOAD_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDX_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX2_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV_LVER_ORIG]]
+; DEFAULT-NEXT: [[LOAD_1_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDX2_LVER_ORIG]], align 4
; DEFAULT-NEXT: [[ADD_LVER_ORIG:%.*]] = add i32 [[LOAD_1_LVER_ORIG]], [[LOAD_LVER_ORIG]]
; DEFAULT-NEXT: [[INDVARS_IV_NEXT_LVER_ORIG]] = add nuw nsw i64 [[INDVARS_IV_LVER_ORIG]], 1
-; DEFAULT-NEXT: [[ARRAYIDX_NEXT_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_LVER_ORIG]]
-; DEFAULT-NEXT: store i32 [[ADD_LVER_ORIG]], i32* [[ARRAYIDX_NEXT_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX_NEXT_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_LVER_ORIG]]
+; DEFAULT-NEXT: store i32 [[ADD_LVER_ORIG]], ptr [[ARRAYIDX_NEXT_LVER_ORIG]], align 4
; DEFAULT-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_LVER_ORIG]], [[N:%.*]]
; DEFAULT-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
; DEFAULT: for.body.ph:
-; DEFAULT-NEXT: [[LOAD_INITIAL:%.*]] = load i32, i32* [[A]], align 4
+; DEFAULT-NEXT: [[LOAD_INITIAL:%.*]] = load i32, ptr [[A]], align 4
; DEFAULT-NEXT: br label [[FOR_BODY:%.*]]
; DEFAULT: for.body:
; DEFAULT-NEXT: [[STORE_FORWARDED:%.*]] = phi i32 [ [[LOAD_INITIAL]], [[FOR_BODY_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; DEFAULT-NEXT: [[MUL:%.*]] = mul i64 [[INDVARS_IV]], [[STRIDE]]
-; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL]]
-; DEFAULT-NEXT: [[LOAD:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
-; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV]]
-; DEFAULT-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[MUL]]
+; DEFAULT-NEXT: [[LOAD:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
+; DEFAULT-NEXT: [[LOAD_1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; DEFAULT-NEXT: [[ADD]] = add i32 [[LOAD_1]], [[STORE_FORWARDED]]
; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; DEFAULT-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT]]
-; DEFAULT-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX_NEXT]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
+; DEFAULT-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX_NEXT]], align 4
; DEFAULT-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
; DEFAULT-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT1:%.*]], label [[FOR_BODY]]
; DEFAULT: for.end.loopexit:
; NO-VERSION: for.body:
; NO-VERSION-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; NO-VERSION-NEXT: [[MUL:%.*]] = mul i64 [[INDVARS_IV]], [[STRIDE:%.*]]
-; NO-VERSION-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[MUL]]
-; NO-VERSION-NEXT: [[LOAD:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
-; NO-VERSION-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]]
-; NO-VERSION-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
+; NO-VERSION-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[MUL]]
+; NO-VERSION-NEXT: [[LOAD:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; NO-VERSION-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; NO-VERSION-NEXT: [[LOAD_1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; NO-VERSION-NEXT: [[ADD:%.*]] = add i32 [[LOAD_1]], [[LOAD]]
; NO-VERSION-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; NO-VERSION-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT]]
-; NO-VERSION-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX_NEXT]], align 4
+; NO-VERSION-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
+; NO-VERSION-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX_NEXT]], align 4
; NO-VERSION-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
; NO-VERSION-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; NO-VERSION: for.end:
; THRESHOLD: for.body.lver.orig:
; THRESHOLD-NEXT: [[INDVARS_IV_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INDVARS_IV_NEXT_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
; THRESHOLD-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i64 [[INDVARS_IV_LVER_ORIG]], [[STRIDE]]
-; THRESHOLD-NEXT: [[ARRAYIDX_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[MUL_LVER_ORIG]]
-; THRESHOLD-NEXT: [[LOAD_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDX_LVER_ORIG]], align 4
-; THRESHOLD-NEXT: [[ARRAYIDX2_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV_LVER_ORIG]]
-; THRESHOLD-NEXT: [[LOAD_1_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDX2_LVER_ORIG]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[MUL_LVER_ORIG]]
+; THRESHOLD-NEXT: [[LOAD_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDX_LVER_ORIG]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX2_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV_LVER_ORIG]]
+; THRESHOLD-NEXT: [[LOAD_1_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDX2_LVER_ORIG]], align 4
; THRESHOLD-NEXT: [[ADD_LVER_ORIG:%.*]] = add i32 [[LOAD_1_LVER_ORIG]], [[LOAD_LVER_ORIG]]
; THRESHOLD-NEXT: [[INDVARS_IV_NEXT_LVER_ORIG]] = add nuw nsw i64 [[INDVARS_IV_LVER_ORIG]], 1
-; THRESHOLD-NEXT: [[ARRAYIDX_NEXT_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_LVER_ORIG]]
-; THRESHOLD-NEXT: store i32 [[ADD_LVER_ORIG]], i32* [[ARRAYIDX_NEXT_LVER_ORIG]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX_NEXT_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_LVER_ORIG]]
+; THRESHOLD-NEXT: store i32 [[ADD_LVER_ORIG]], ptr [[ARRAYIDX_NEXT_LVER_ORIG]], align 4
; THRESHOLD-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_LVER_ORIG]], [[N:%.*]]
; THRESHOLD-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
; THRESHOLD: for.body.ph:
-; THRESHOLD-NEXT: [[LOAD_INITIAL:%.*]] = load i32, i32* [[A]], align 4
+; THRESHOLD-NEXT: [[LOAD_INITIAL:%.*]] = load i32, ptr [[A]], align 4
; THRESHOLD-NEXT: br label [[FOR_BODY:%.*]]
; THRESHOLD: for.body:
; THRESHOLD-NEXT: [[STORE_FORWARDED:%.*]] = phi i32 [ [[LOAD_INITIAL]], [[FOR_BODY_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; THRESHOLD-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; THRESHOLD-NEXT: [[MUL:%.*]] = mul i64 [[INDVARS_IV]], [[STRIDE]]
-; THRESHOLD-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL]]
-; THRESHOLD-NEXT: [[LOAD:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
-; THRESHOLD-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV]]
-; THRESHOLD-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[MUL]]
+; THRESHOLD-NEXT: [[LOAD:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
+; THRESHOLD-NEXT: [[LOAD_1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; THRESHOLD-NEXT: [[ADD]] = add i32 [[LOAD_1]], [[STORE_FORWARDED]]
; THRESHOLD-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; THRESHOLD-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT]]
-; THRESHOLD-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX_NEXT]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
+; THRESHOLD-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX_NEXT]], align 4
; THRESHOLD-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
; THRESHOLD-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT1:%.*]], label [[FOR_BODY]]
; THRESHOLD: for.end.loopexit:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%mul = mul i64 %indvars.iv, %stride
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
- %load = load i32, i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %load_1 = load i32, i32* %arrayidx2, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %mul
+ %load = load i32, ptr %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %load_1 = load i32, ptr %arrayidx2, align 4
%add = add i32 %load_1, %load
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %arrayidx_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
- store i32 %add, i32* %arrayidx_next, align 4
+ %arrayidx_next = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+ store i32 %add, ptr %arrayidx_next, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
}
; Similar to @f(), but with a struct type.
-define void @f_struct({ i32, i8 } * noalias nocapture %A, { i32, i8 }* noalias nocapture readonly %B, i64 %N,
+define void @f_struct(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, i64 %N,
;
;
;
; DEFAULT: for.body.lver.orig:
; DEFAULT-NEXT: [[INDVARS_IV_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INDVARS_IV_NEXT_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
; DEFAULT-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i64 [[INDVARS_IV_LVER_ORIG]], [[STRIDE]]
-; DEFAULT-NEXT: [[ARRAYIDX_LVER_ORIG:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[A:%.*]], i64 [[MUL_LVER_ORIG]]
-; DEFAULT-NEXT: [[LOAD_LVER_ORIG:%.*]] = load { i32, i8 }, { i32, i8 }* [[ARRAYIDX_LVER_ORIG]], align 4
-; DEFAULT-NEXT: [[ARRAYIDX2_LVER_ORIG:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[B:%.*]], i64 [[INDVARS_IV_LVER_ORIG]]
-; DEFAULT-NEXT: [[LOAD_1_LVER_ORIG:%.*]] = load { i32, i8 }, { i32, i8 }* [[ARRAYIDX2_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX_LVER_ORIG:%.*]] = getelementptr inbounds { i32, i8 }, ptr [[A:%.*]], i64 [[MUL_LVER_ORIG]]
+; DEFAULT-NEXT: [[LOAD_LVER_ORIG:%.*]] = load { i32, i8 }, ptr [[ARRAYIDX_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX2_LVER_ORIG:%.*]] = getelementptr inbounds { i32, i8 }, ptr [[B:%.*]], i64 [[INDVARS_IV_LVER_ORIG]]
+; DEFAULT-NEXT: [[LOAD_1_LVER_ORIG:%.*]] = load { i32, i8 }, ptr [[ARRAYIDX2_LVER_ORIG]], align 4
; DEFAULT-NEXT: [[V1_LVER_ORIG:%.*]] = extractvalue { i32, i8 } [[LOAD_LVER_ORIG]], 0
; DEFAULT-NEXT: [[V2_LVER_ORIG:%.*]] = extractvalue { i32, i8 } [[LOAD_1_LVER_ORIG]], 0
; DEFAULT-NEXT: [[ADD_LVER_ORIG:%.*]] = add i32 [[V1_LVER_ORIG]], [[V2_LVER_ORIG]]
; DEFAULT-NEXT: [[INS_LVER_ORIG:%.*]] = insertvalue { i32, i8 } undef, i32 [[ADD_LVER_ORIG]], 0
; DEFAULT-NEXT: [[INDVARS_IV_NEXT_LVER_ORIG]] = add nuw nsw i64 [[INDVARS_IV_LVER_ORIG]], 1
-; DEFAULT-NEXT: [[ARRAYIDX_NEXT_LVER_ORIG:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[A]], i64 [[INDVARS_IV_NEXT_LVER_ORIG]]
-; DEFAULT-NEXT: store { i32, i8 } [[INS_LVER_ORIG]], { i32, i8 }* [[ARRAYIDX_NEXT_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX_NEXT_LVER_ORIG:%.*]] = getelementptr inbounds { i32, i8 }, ptr [[A]], i64 [[INDVARS_IV_NEXT_LVER_ORIG]]
+; DEFAULT-NEXT: store { i32, i8 } [[INS_LVER_ORIG]], ptr [[ARRAYIDX_NEXT_LVER_ORIG]], align 4
; DEFAULT-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_LVER_ORIG]], [[N:%.*]]
; DEFAULT-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
; DEFAULT: for.body.ph:
-; DEFAULT-NEXT: [[LOAD_INITIAL:%.*]] = load { i32, i8 }, { i32, i8 }* [[A]], align 4
+; DEFAULT-NEXT: [[LOAD_INITIAL:%.*]] = load { i32, i8 }, ptr [[A]], align 4
; DEFAULT-NEXT: br label [[FOR_BODY:%.*]]
; DEFAULT: for.body:
; DEFAULT-NEXT: [[STORE_FORWARDED:%.*]] = phi { i32, i8 } [ [[LOAD_INITIAL]], [[FOR_BODY_PH]] ], [ [[INS:%.*]], [[FOR_BODY]] ]
; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; DEFAULT-NEXT: [[MUL:%.*]] = mul i64 [[INDVARS_IV]], [[STRIDE]]
-; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[A]], i64 [[MUL]]
-; DEFAULT-NEXT: [[LOAD:%.*]] = load { i32, i8 }, { i32, i8 }* [[ARRAYIDX]], align 4
-; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[B]], i64 [[INDVARS_IV]]
-; DEFAULT-NEXT: [[LOAD_1:%.*]] = load { i32, i8 }, { i32, i8 }* [[ARRAYIDX2]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds { i32, i8 }, ptr [[A]], i64 [[MUL]]
+; DEFAULT-NEXT: [[LOAD:%.*]] = load { i32, i8 }, ptr [[ARRAYIDX]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds { i32, i8 }, ptr [[B]], i64 [[INDVARS_IV]]
+; DEFAULT-NEXT: [[LOAD_1:%.*]] = load { i32, i8 }, ptr [[ARRAYIDX2]], align 4
; DEFAULT-NEXT: [[V1:%.*]] = extractvalue { i32, i8 } [[STORE_FORWARDED]], 0
; DEFAULT-NEXT: [[V2:%.*]] = extractvalue { i32, i8 } [[LOAD_1]], 0
; DEFAULT-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V2]]
; DEFAULT-NEXT: [[INS]] = insertvalue { i32, i8 } undef, i32 [[ADD]], 0
; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; DEFAULT-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[A]], i64 [[INDVARS_IV_NEXT]]
-; DEFAULT-NEXT: store { i32, i8 } [[INS]], { i32, i8 }* [[ARRAYIDX_NEXT]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds { i32, i8 }, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
+; DEFAULT-NEXT: store { i32, i8 } [[INS]], ptr [[ARRAYIDX_NEXT]], align 4
; DEFAULT-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
; DEFAULT-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT1:%.*]], label [[FOR_BODY]]
; DEFAULT: for.end.loopexit:
; NO-VERSION: for.body:
; NO-VERSION-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; NO-VERSION-NEXT: [[MUL:%.*]] = mul i64 [[INDVARS_IV]], [[STRIDE:%.*]]
-; NO-VERSION-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[A:%.*]], i64 [[MUL]]
-; NO-VERSION-NEXT: [[LOAD:%.*]] = load { i32, i8 }, { i32, i8 }* [[ARRAYIDX]], align 4
-; NO-VERSION-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[B:%.*]], i64 [[INDVARS_IV]]
-; NO-VERSION-NEXT: [[LOAD_1:%.*]] = load { i32, i8 }, { i32, i8 }* [[ARRAYIDX2]], align 4
+; NO-VERSION-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds { i32, i8 }, ptr [[A:%.*]], i64 [[MUL]]
+; NO-VERSION-NEXT: [[LOAD:%.*]] = load { i32, i8 }, ptr [[ARRAYIDX]], align 4
+; NO-VERSION-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds { i32, i8 }, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; NO-VERSION-NEXT: [[LOAD_1:%.*]] = load { i32, i8 }, ptr [[ARRAYIDX2]], align 4
; NO-VERSION-NEXT: [[V1:%.*]] = extractvalue { i32, i8 } [[LOAD]], 0
; NO-VERSION-NEXT: [[V2:%.*]] = extractvalue { i32, i8 } [[LOAD_1]], 0
; NO-VERSION-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V2]]
; NO-VERSION-NEXT: [[INS:%.*]] = insertvalue { i32, i8 } undef, i32 [[ADD]], 0
; NO-VERSION-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; NO-VERSION-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[A]], i64 [[INDVARS_IV_NEXT]]
-; NO-VERSION-NEXT: store { i32, i8 } [[INS]], { i32, i8 }* [[ARRAYIDX_NEXT]], align 4
+; NO-VERSION-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds { i32, i8 }, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
+; NO-VERSION-NEXT: store { i32, i8 } [[INS]], ptr [[ARRAYIDX_NEXT]], align 4
; NO-VERSION-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
; NO-VERSION-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; NO-VERSION: for.end:
; THRESHOLD: for.body.lver.orig:
; THRESHOLD-NEXT: [[INDVARS_IV_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INDVARS_IV_NEXT_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
; THRESHOLD-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i64 [[INDVARS_IV_LVER_ORIG]], [[STRIDE]]
-; THRESHOLD-NEXT: [[ARRAYIDX_LVER_ORIG:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[A:%.*]], i64 [[MUL_LVER_ORIG]]
-; THRESHOLD-NEXT: [[LOAD_LVER_ORIG:%.*]] = load { i32, i8 }, { i32, i8 }* [[ARRAYIDX_LVER_ORIG]], align 4
-; THRESHOLD-NEXT: [[ARRAYIDX2_LVER_ORIG:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[B:%.*]], i64 [[INDVARS_IV_LVER_ORIG]]
-; THRESHOLD-NEXT: [[LOAD_1_LVER_ORIG:%.*]] = load { i32, i8 }, { i32, i8 }* [[ARRAYIDX2_LVER_ORIG]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX_LVER_ORIG:%.*]] = getelementptr inbounds { i32, i8 }, ptr [[A:%.*]], i64 [[MUL_LVER_ORIG]]
+; THRESHOLD-NEXT: [[LOAD_LVER_ORIG:%.*]] = load { i32, i8 }, ptr [[ARRAYIDX_LVER_ORIG]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX2_LVER_ORIG:%.*]] = getelementptr inbounds { i32, i8 }, ptr [[B:%.*]], i64 [[INDVARS_IV_LVER_ORIG]]
+; THRESHOLD-NEXT: [[LOAD_1_LVER_ORIG:%.*]] = load { i32, i8 }, ptr [[ARRAYIDX2_LVER_ORIG]], align 4
; THRESHOLD-NEXT: [[V1_LVER_ORIG:%.*]] = extractvalue { i32, i8 } [[LOAD_LVER_ORIG]], 0
; THRESHOLD-NEXT: [[V2_LVER_ORIG:%.*]] = extractvalue { i32, i8 } [[LOAD_1_LVER_ORIG]], 0
; THRESHOLD-NEXT: [[ADD_LVER_ORIG:%.*]] = add i32 [[V1_LVER_ORIG]], [[V2_LVER_ORIG]]
; THRESHOLD-NEXT: [[INS_LVER_ORIG:%.*]] = insertvalue { i32, i8 } undef, i32 [[ADD_LVER_ORIG]], 0
; THRESHOLD-NEXT: [[INDVARS_IV_NEXT_LVER_ORIG]] = add nuw nsw i64 [[INDVARS_IV_LVER_ORIG]], 1
-; THRESHOLD-NEXT: [[ARRAYIDX_NEXT_LVER_ORIG:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[A]], i64 [[INDVARS_IV_NEXT_LVER_ORIG]]
-; THRESHOLD-NEXT: store { i32, i8 } [[INS_LVER_ORIG]], { i32, i8 }* [[ARRAYIDX_NEXT_LVER_ORIG]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX_NEXT_LVER_ORIG:%.*]] = getelementptr inbounds { i32, i8 }, ptr [[A]], i64 [[INDVARS_IV_NEXT_LVER_ORIG]]
+; THRESHOLD-NEXT: store { i32, i8 } [[INS_LVER_ORIG]], ptr [[ARRAYIDX_NEXT_LVER_ORIG]], align 4
; THRESHOLD-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_LVER_ORIG]], [[N:%.*]]
; THRESHOLD-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
; THRESHOLD: for.body.ph:
-; THRESHOLD-NEXT: [[LOAD_INITIAL:%.*]] = load { i32, i8 }, { i32, i8 }* [[A]], align 4
+; THRESHOLD-NEXT: [[LOAD_INITIAL:%.*]] = load { i32, i8 }, ptr [[A]], align 4
; THRESHOLD-NEXT: br label [[FOR_BODY:%.*]]
; THRESHOLD: for.body:
; THRESHOLD-NEXT: [[STORE_FORWARDED:%.*]] = phi { i32, i8 } [ [[LOAD_INITIAL]], [[FOR_BODY_PH]] ], [ [[INS:%.*]], [[FOR_BODY]] ]
; THRESHOLD-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; THRESHOLD-NEXT: [[MUL:%.*]] = mul i64 [[INDVARS_IV]], [[STRIDE]]
-; THRESHOLD-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[A]], i64 [[MUL]]
-; THRESHOLD-NEXT: [[LOAD:%.*]] = load { i32, i8 }, { i32, i8 }* [[ARRAYIDX]], align 4
-; THRESHOLD-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[B]], i64 [[INDVARS_IV]]
-; THRESHOLD-NEXT: [[LOAD_1:%.*]] = load { i32, i8 }, { i32, i8 }* [[ARRAYIDX2]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds { i32, i8 }, ptr [[A]], i64 [[MUL]]
+; THRESHOLD-NEXT: [[LOAD:%.*]] = load { i32, i8 }, ptr [[ARRAYIDX]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds { i32, i8 }, ptr [[B]], i64 [[INDVARS_IV]]
+; THRESHOLD-NEXT: [[LOAD_1:%.*]] = load { i32, i8 }, ptr [[ARRAYIDX2]], align 4
; THRESHOLD-NEXT: [[V1:%.*]] = extractvalue { i32, i8 } [[STORE_FORWARDED]], 0
; THRESHOLD-NEXT: [[V2:%.*]] = extractvalue { i32, i8 } [[LOAD_1]], 0
; THRESHOLD-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V2]]
; THRESHOLD-NEXT: [[INS]] = insertvalue { i32, i8 } undef, i32 [[ADD]], 0
; THRESHOLD-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; THRESHOLD-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[A]], i64 [[INDVARS_IV_NEXT]]
-; THRESHOLD-NEXT: store { i32, i8 } [[INS]], { i32, i8 }* [[ARRAYIDX_NEXT]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds { i32, i8 }, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
+; THRESHOLD-NEXT: store { i32, i8 } [[INS]], ptr [[ARRAYIDX_NEXT]], align 4
; THRESHOLD-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
; THRESHOLD-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT1:%.*]], label [[FOR_BODY]]
; THRESHOLD: for.end.loopexit:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%mul = mul i64 %indvars.iv, %stride
- %arrayidx = getelementptr inbounds { i32, i8 }, { i32, i8 }* %A, i64 %mul
- %load = load { i32, i8 }, { i32, i8 }* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds { i32, i8 }, { i32, i8 }* %B, i64 %indvars.iv
- %load_1 = load { i32, i8 }, { i32, i8 }* %arrayidx2, align 4
+ %arrayidx = getelementptr inbounds { i32, i8 }, ptr %A, i64 %mul
+ %load = load { i32, i8 }, ptr %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds { i32, i8 }, ptr %B, i64 %indvars.iv
+ %load_1 = load { i32, i8 }, ptr %arrayidx2, align 4
%v1 = extractvalue { i32, i8 } %load, 0
%add = add i32 %v1, %v2
%ins = insertvalue { i32, i8 } undef, i32 %add, 0
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %arrayidx_next = getelementptr inbounds { i32, i8 }, { i32, i8 }* %A, i64 %indvars.iv.next
- store { i32, i8 } %ins, { i32, i8 }* %arrayidx_next, align 4
+ %arrayidx_next = getelementptr inbounds { i32, i8 }, ptr %A, i64 %indvars.iv.next
+ store { i32, i8 } %ins, ptr %arrayidx_next, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
; for (unsigned i = 0; i < 100; i++)
; A[Stride2 * (i + 1)] = A[Stride1 * i] + B[i];
-define void @two_strides(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i64 %N,
+define void @two_strides(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, i64 %N,
;
;
;
; DEFAULT: for.body.lver.orig:
; DEFAULT-NEXT: [[INDVARS_IV_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INDVARS_IV_NEXT_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
; DEFAULT-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i64 [[INDVARS_IV_LVER_ORIG]], [[STRIDE_1]]
-; DEFAULT-NEXT: [[ARRAYIDX_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[MUL_LVER_ORIG]]
-; DEFAULT-NEXT: [[LOAD_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDX_LVER_ORIG]], align 4
-; DEFAULT-NEXT: [[ARRAYIDX2_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV_LVER_ORIG]]
-; DEFAULT-NEXT: [[LOAD_1_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDX2_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[MUL_LVER_ORIG]]
+; DEFAULT-NEXT: [[LOAD_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDX_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX2_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV_LVER_ORIG]]
+; DEFAULT-NEXT: [[LOAD_1_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDX2_LVER_ORIG]], align 4
; DEFAULT-NEXT: [[ADD_LVER_ORIG:%.*]] = add i32 [[LOAD_1_LVER_ORIG]], [[LOAD_LVER_ORIG]]
; DEFAULT-NEXT: [[INDVARS_IV_NEXT_LVER_ORIG]] = add nuw nsw i64 [[INDVARS_IV_LVER_ORIG]], 1
; DEFAULT-NEXT: [[MUL_2_LVER_ORIG:%.*]] = mul i64 [[INDVARS_IV_NEXT_LVER_ORIG]], [[STRIDE_2]]
-; DEFAULT-NEXT: [[ARRAYIDX_NEXT_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_2_LVER_ORIG]]
-; DEFAULT-NEXT: store i32 [[ADD_LVER_ORIG]], i32* [[ARRAYIDX_NEXT_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX_NEXT_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[MUL_2_LVER_ORIG]]
+; DEFAULT-NEXT: store i32 [[ADD_LVER_ORIG]], ptr [[ARRAYIDX_NEXT_LVER_ORIG]], align 4
; DEFAULT-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_LVER_ORIG]], [[N:%.*]]
; DEFAULT-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
; DEFAULT: for.body.ph:
-; DEFAULT-NEXT: [[LOAD_INITIAL:%.*]] = load i32, i32* [[A]], align 4
+; DEFAULT-NEXT: [[LOAD_INITIAL:%.*]] = load i32, ptr [[A]], align 4
; DEFAULT-NEXT: br label [[FOR_BODY:%.*]]
; DEFAULT: for.body:
; DEFAULT-NEXT: [[STORE_FORWARDED:%.*]] = phi i32 [ [[LOAD_INITIAL]], [[FOR_BODY_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; DEFAULT-NEXT: [[MUL:%.*]] = mul i64 [[INDVARS_IV]], [[STRIDE_1]]
-; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL]]
-; DEFAULT-NEXT: [[LOAD:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
-; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV]]
-; DEFAULT-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[MUL]]
+; DEFAULT-NEXT: [[LOAD:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
+; DEFAULT-NEXT: [[LOAD_1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; DEFAULT-NEXT: [[ADD]] = add i32 [[LOAD_1]], [[STORE_FORWARDED]]
; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; DEFAULT-NEXT: [[MUL_2:%.*]] = mul i64 [[INDVARS_IV_NEXT]], [[STRIDE_2]]
-; DEFAULT-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_2]]
-; DEFAULT-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX_NEXT]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[MUL_2]]
+; DEFAULT-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX_NEXT]], align 4
; DEFAULT-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
; DEFAULT-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT2:%.*]], label [[FOR_BODY]]
; DEFAULT: for.end.loopexit:
; NO-VERSION: for.body:
; NO-VERSION-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; NO-VERSION-NEXT: [[MUL:%.*]] = mul i64 [[INDVARS_IV]], [[STRIDE_1:%.*]]
-; NO-VERSION-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[MUL]]
-; NO-VERSION-NEXT: [[LOAD:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
-; NO-VERSION-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]]
-; NO-VERSION-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
+; NO-VERSION-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[MUL]]
+; NO-VERSION-NEXT: [[LOAD:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; NO-VERSION-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; NO-VERSION-NEXT: [[LOAD_1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; NO-VERSION-NEXT: [[ADD:%.*]] = add i32 [[LOAD_1]], [[LOAD]]
; NO-VERSION-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; NO-VERSION-NEXT: [[MUL_2:%.*]] = mul i64 [[INDVARS_IV_NEXT]], [[STRIDE_2:%.*]]
-; NO-VERSION-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_2]]
-; NO-VERSION-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX_NEXT]], align 4
+; NO-VERSION-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[MUL_2]]
+; NO-VERSION-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX_NEXT]], align 4
; NO-VERSION-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
; NO-VERSION-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; NO-VERSION: for.end:
; THRESHOLD: for.body:
; THRESHOLD-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; THRESHOLD-NEXT: [[MUL:%.*]] = mul i64 [[INDVARS_IV]], [[STRIDE_1:%.*]]
-; THRESHOLD-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[MUL]]
-; THRESHOLD-NEXT: [[LOAD:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
-; THRESHOLD-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]]
-; THRESHOLD-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[MUL]]
+; THRESHOLD-NEXT: [[LOAD:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; THRESHOLD-NEXT: [[LOAD_1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; THRESHOLD-NEXT: [[ADD:%.*]] = add i32 [[LOAD_1]], [[LOAD]]
; THRESHOLD-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; THRESHOLD-NEXT: [[MUL_2:%.*]] = mul i64 [[INDVARS_IV_NEXT]], [[STRIDE_2:%.*]]
-; THRESHOLD-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_2]]
-; THRESHOLD-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX_NEXT]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[MUL_2]]
+; THRESHOLD-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX_NEXT]], align 4
; THRESHOLD-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
; THRESHOLD-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; THRESHOLD: for.end:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%mul = mul i64 %indvars.iv, %stride.1
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
- %load = load i32, i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %load_1 = load i32, i32* %arrayidx2, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %mul
+ %load = load i32, ptr %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %load_1 = load i32, ptr %arrayidx2, align 4
%add = add i32 %load_1, %load
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%mul.2 = mul i64 %indvars.iv.next, %stride.2
- %arrayidx_next = getelementptr inbounds i32, i32* %A, i64 %mul.2
- store i32 %add, i32* %arrayidx_next, align 4
+ %arrayidx_next = getelementptr inbounds i32, ptr %A, i64 %mul.2
+ store i32 %add, ptr %arrayidx_next, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
target datalayout = "e-m:o-p64:64:64-i64:64-f80:128-n8:16:32:64-S128"
-define void @f(i32* noalias %A, i32* noalias %B, i32* noalias %C, i64 %N) {
+define void @f(ptr noalias %A, ptr noalias %B, ptr noalias %C, i64 %N) {
; CHECK-LABEL: @f(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A1:%.*]] = bitcast i32* [[A:%.*]] to float*
-; CHECK-NEXT: [[LOAD_INITIAL:%.*]] = load float, float* [[A1]], align 4
+; CHECK-NEXT: [[LOAD_INITIAL:%.*]] = load float, ptr [[A:%.*]], align 4
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[STORE_FORWARDED:%.*]] = phi float [ [[LOAD_INITIAL]], [[ENTRY:%.*]] ], [ [[STORE_FORWARD_CAST:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT]]
-; CHECK-NEXT: [[BIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[CIDX:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[AIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[AIDX_FLOAT:%.*]] = bitcast i32* [[AIDX]] to float*
-; CHECK-NEXT: [[B:%.*]] = load i32, i32* [[BIDX]], align 4
+; CHECK-NEXT: [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[BIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[CIDX:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[AIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[B:%.*]] = load i32, ptr [[BIDX]], align 4
; CHECK-NEXT: [[A_P1:%.*]] = add i32 [[B]], 2
; CHECK-NEXT: [[STORE_FORWARD_CAST]] = bitcast i32 [[A_P1]] to float
-; CHECK-NEXT: store i32 [[A_P1]], i32* [[AIDX_NEXT]], align 4
-; CHECK-NEXT: [[A:%.*]] = load float, float* [[AIDX_FLOAT]], align 4
+; CHECK-NEXT: store i32 [[A_P1]], ptr [[AIDX_NEXT]], align 4
+; CHECK-NEXT: [[A:%.*]] = load float, ptr [[AIDX]], align 4
; CHECK-NEXT: [[C:%.*]] = fmul float [[STORE_FORWARDED]], 2.000000e+00
; CHECK-NEXT: [[C_INT:%.*]] = fptosi float [[C]] to i32
-; CHECK-NEXT: store i32 [[C_INT]], i32* [[CIDX]], align 4
+; CHECK-NEXT: store i32 [[C_INT]], ptr [[CIDX]], align 4
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; CHECK: for.end:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %Aidx_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
- %Bidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %Cidx = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
- %Aidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %Aidx.float = bitcast i32* %Aidx to float*
+ %Aidx_next = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+ %Bidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %Cidx = getelementptr inbounds i32, ptr %C, i64 %indvars.iv
+ %Aidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
- %b = load i32, i32* %Bidx, align 4
+ %b = load i32, ptr %Bidx, align 4
%a_p1 = add i32 %b, 2
- store i32 %a_p1, i32* %Aidx_next, align 4
+ store i32 %a_p1, ptr %Aidx_next, align 4
- %a = load float, float* %Aidx.float, align 4
+ %a = load float, ptr %Aidx, align 4
%c = fmul float %a, 2.0
%c.int = fptosi float %c to i32
- store i32 %c.int, i32* %Cidx, align 4
+ store i32 %c.int, ptr %Cidx, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
; C[i] = ((float*)A)[i] * 2;
; }
-define void @f2(i32* noalias %A, i32* noalias %B, i32* noalias %C, i64 %N) {
+define void @f2(ptr noalias %A, ptr noalias %B, ptr noalias %C, i64 %N) {
; CHECK-LABEL: @f2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A1:%.*]] = bitcast i32* [[A:%.*]] to float*
-; CHECK-NEXT: [[LOAD_INITIAL:%.*]] = load float, float* [[A1]], align 4
+; CHECK-NEXT: [[LOAD_INITIAL:%.*]] = load float, ptr [[A:%.*]], align 4
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[STORE_FORWARDED:%.*]] = phi float [ [[LOAD_INITIAL]], [[ENTRY:%.*]] ], [ [[STORE_FORWARD_CAST:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT]]
-; CHECK-NEXT: [[BIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[CIDX:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[AIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[AIDX_FLOAT:%.*]] = bitcast i32* [[AIDX]] to float*
-; CHECK-NEXT: [[B:%.*]] = load i32, i32* [[BIDX]], align 4
+; CHECK-NEXT: [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[BIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[CIDX:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[AIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[B:%.*]] = load i32, ptr [[BIDX]], align 4
; CHECK-NEXT: [[A_P2:%.*]] = add i32 [[B]], 2
-; CHECK-NEXT: store i32 [[A_P2]], i32* [[AIDX_NEXT]], align 4
+; CHECK-NEXT: store i32 [[A_P2]], ptr [[AIDX_NEXT]], align 4
; CHECK-NEXT: [[A_P3:%.*]] = add i32 [[B]], 3
; CHECK-NEXT: [[STORE_FORWARD_CAST]] = bitcast i32 [[A_P3]] to float
-; CHECK-NEXT: store i32 [[A_P3]], i32* [[AIDX_NEXT]], align 4
-; CHECK-NEXT: [[A:%.*]] = load float, float* [[AIDX_FLOAT]], align 4
+; CHECK-NEXT: store i32 [[A_P3]], ptr [[AIDX_NEXT]], align 4
+; CHECK-NEXT: [[A:%.*]] = load float, ptr [[AIDX]], align 4
; CHECK-NEXT: [[C:%.*]] = fmul float [[STORE_FORWARDED]], 2.000000e+00
; CHECK-NEXT: [[C_INT:%.*]] = fptosi float [[C]] to i32
-; CHECK-NEXT: store i32 [[C_INT]], i32* [[CIDX]], align 4
+; CHECK-NEXT: store i32 [[C_INT]], ptr [[CIDX]], align 4
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; CHECK: for.end:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %Aidx_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
- %Bidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %Cidx = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
- %Aidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %Aidx.float = bitcast i32* %Aidx to float*
+ %Aidx_next = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+ %Bidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %Cidx = getelementptr inbounds i32, ptr %C, i64 %indvars.iv
+ %Aidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
- %b = load i32, i32* %Bidx, align 4
+ %b = load i32, ptr %Bidx, align 4
%a_p2 = add i32 %b, 2
- store i32 %a_p2, i32* %Aidx_next, align 4
+ store i32 %a_p2, ptr %Aidx_next, align 4
%a_p3 = add i32 %b, 3
- store i32 %a_p3, i32* %Aidx_next, align 4
+ store i32 %a_p3, ptr %Aidx_next, align 4
- %a = load float, float* %Aidx.float, align 4
+ %a = load float, ptr %Aidx, align 4
%c = fmul float %a, 2.0
%c.int = fptosi float %c to i32
- store i32 %c.int, i32* %Cidx, align 4
+ store i32 %c.int, ptr %Cidx, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
; Check that we can forward between pointer-sized integers and actual
; pointers.
-define void @f3(i64* noalias %A, i64* noalias %B, i64* noalias %C, i64 %N) {
+define void @f3(ptr noalias %A, ptr noalias %B, ptr noalias %C, i64 %N) {
; CHECK-LABEL: @f3(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A1:%.*]] = bitcast i64* [[A:%.*]] to i8**
-; CHECK-NEXT: [[LOAD_INITIAL:%.*]] = load i8*, i8** [[A1]], align 8
+; CHECK-NEXT: [[LOAD_INITIAL:%.*]] = load ptr, ptr [[A:%.*]], align 8
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[STORE_FORWARDED:%.*]] = phi i8* [ [[LOAD_INITIAL]], [[ENTRY:%.*]] ], [ [[STORE_FORWARD_CAST:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[STORE_FORWARDED:%.*]] = phi ptr [ [[LOAD_INITIAL]], [[ENTRY:%.*]] ], [ [[STORE_FORWARD_CAST:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[AIDX_NEXT:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV_NEXT]]
-; CHECK-NEXT: [[BIDX:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[CIDX:%.*]] = getelementptr inbounds i64, i64* [[C:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[AIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[AIDX_I8P:%.*]] = bitcast i64* [[AIDX]] to i8**
-; CHECK-NEXT: [[B:%.*]] = load i64, i64* [[BIDX]], align 8
+; CHECK-NEXT: [[AIDX_NEXT:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[BIDX:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[CIDX:%.*]] = getelementptr inbounds i64, ptr [[C:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[AIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[B:%.*]] = load i64, ptr [[BIDX]], align 8
; CHECK-NEXT: [[A_P1:%.*]] = add i64 [[B]], 2
-; CHECK-NEXT: [[STORE_FORWARD_CAST]] = inttoptr i64 [[A_P1]] to i8*
-; CHECK-NEXT: store i64 [[A_P1]], i64* [[AIDX_NEXT]], align 8
-; CHECK-NEXT: [[A:%.*]] = load i8*, i8** [[AIDX_I8P]], align 8
-; CHECK-NEXT: [[C:%.*]] = getelementptr i8, i8* [[STORE_FORWARDED]], i64 57
-; CHECK-NEXT: [[C_I64P:%.*]] = ptrtoint i8* [[C]] to i64
-; CHECK-NEXT: store i64 [[C_I64P]], i64* [[CIDX]], align 8
+; CHECK-NEXT: [[STORE_FORWARD_CAST]] = inttoptr i64 [[A_P1]] to ptr
+; CHECK-NEXT: store i64 [[A_P1]], ptr [[AIDX_NEXT]], align 8
+; CHECK-NEXT: [[A:%.*]] = load ptr, ptr [[AIDX]], align 8
+; CHECK-NEXT: [[C:%.*]] = getelementptr i8, ptr [[STORE_FORWARDED]], i64 57
+; CHECK-NEXT: [[C_I64P:%.*]] = ptrtoint ptr [[C]] to i64
+; CHECK-NEXT: store i64 [[C_I64P]], ptr [[CIDX]], align 8
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; CHECK: for.end:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %Aidx_next = getelementptr inbounds i64, i64* %A, i64 %indvars.iv.next
- %Bidx = getelementptr inbounds i64, i64* %B, i64 %indvars.iv
- %Cidx = getelementptr inbounds i64, i64* %C, i64 %indvars.iv
- %Aidx = getelementptr inbounds i64, i64* %A, i64 %indvars.iv
- %Aidx.i8p = bitcast i64* %Aidx to i8**
+ %Aidx_next = getelementptr inbounds i64, ptr %A, i64 %indvars.iv.next
+ %Bidx = getelementptr inbounds i64, ptr %B, i64 %indvars.iv
+ %Cidx = getelementptr inbounds i64, ptr %C, i64 %indvars.iv
+ %Aidx = getelementptr inbounds i64, ptr %A, i64 %indvars.iv
- %b = load i64, i64* %Bidx, align 8
+ %b = load i64, ptr %Bidx, align 8
%a_p1 = add i64 %b, 2
- store i64 %a_p1, i64* %Aidx_next, align 8
+ store i64 %a_p1, ptr %Aidx_next, align 8
- %a = load i8*, i8** %Aidx.i8p, align 8
- %c = getelementptr i8, i8* %a, i64 57
- %c.i64p = ptrtoint i8* %c to i64
- store i64 %c.i64p, i64* %Cidx, align 8
+ %a = load ptr, ptr %Aidx, align 8
+ %c = getelementptr i8, ptr %a, i64 57
+ %c.i64p = ptrtoint ptr %c to i64
+ store i64 %c.i64p, ptr %Cidx, align 8
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
; C[i] = ((float*)A)[i] * 2;
; }
-define void @f4(i32* noalias %A, i32* noalias %B, i32* noalias %C, i64 %N) {
+define void @f4(ptr noalias %A, ptr noalias %B, ptr noalias %C, i64 %N) {
; CHECK-LABEL: @f4(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A1:%.*]] = bitcast i32* [[A:%.*]] to <2 x half>*
-; CHECK-NEXT: [[LOAD_INITIAL:%.*]] = load <2 x half>, <2 x half>* [[A1]], align 4
+; CHECK-NEXT: [[LOAD_INITIAL:%.*]] = load <2 x half>, ptr [[A:%.*]], align 4
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[STORE_FORWARDED:%.*]] = phi <2 x half> [ [[LOAD_INITIAL]], [[ENTRY:%.*]] ], [ [[STORE_FORWARD_CAST:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT]]
-; CHECK-NEXT: [[BIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[CIDX:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[AIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[AIDX_FLOAT:%.*]] = bitcast i32* [[AIDX]] to <2 x half>*
-; CHECK-NEXT: [[B:%.*]] = load i32, i32* [[BIDX]], align 4
+; CHECK-NEXT: [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[BIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[CIDX:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[AIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[B:%.*]] = load i32, ptr [[BIDX]], align 4
; CHECK-NEXT: [[A_P1:%.*]] = add i32 [[B]], 2
; CHECK-NEXT: [[STORE_FORWARD_CAST]] = bitcast i32 [[A_P1]] to <2 x half>
-; CHECK-NEXT: store i32 [[A_P1]], i32* [[AIDX_NEXT]], align 4
-; CHECK-NEXT: [[A:%.*]] = load <2 x half>, <2 x half>* [[AIDX_FLOAT]], align 4
+; CHECK-NEXT: store i32 [[A_P1]], ptr [[AIDX_NEXT]], align 4
+; CHECK-NEXT: [[A:%.*]] = load <2 x half>, ptr [[AIDX]], align 4
; CHECK-NEXT: [[C:%.*]] = fmul <2 x half> [[STORE_FORWARDED]], <half 0xH4000, half 0xH4000>
; CHECK-NEXT: [[C_INT:%.*]] = bitcast <2 x half> [[C]] to i32
-; CHECK-NEXT: store i32 [[C_INT]], i32* [[CIDX]], align 4
+; CHECK-NEXT: store i32 [[C_INT]], ptr [[CIDX]], align 4
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; CHECK: for.end:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %Aidx_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
- %Bidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %Cidx = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
- %Aidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %Aidx.float = bitcast i32* %Aidx to <2 x half>*
+ %Aidx_next = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+ %Bidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %Cidx = getelementptr inbounds i32, ptr %C, i64 %indvars.iv
+ %Aidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
- %b = load i32, i32* %Bidx, align 4
+ %b = load i32, ptr %Bidx, align 4
%a_p1 = add i32 %b, 2
- store i32 %a_p1, i32* %Aidx_next, align 4
+ store i32 %a_p1, ptr %Aidx_next, align 4
- %a = load <2 x half>, <2 x half>* %Aidx.float, align 4
+ %a = load <2 x half>, ptr %Aidx, align 4
%c = fmul <2 x half> %a, <half 2.0, half 2.0>
%c.int = bitcast <2 x half> %c to i32
- store i32 %c.int, i32* %Cidx, align 4
+ store i32 %c.int, ptr %Cidx, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
; Check that we don't forward between integers and actual
; pointers if sizes don't match.
-define void @f5(i32* noalias %A, i32* noalias %B, i32* noalias %C, i64 %N) {
+define void @f5(ptr noalias %A, ptr noalias %B, ptr noalias %C, i64 %N) {
; CHECK-LABEL: @f5(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV_NEXT]]
-; CHECK-NEXT: [[BIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[CIDX:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[AIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[AIDX_I8P:%.*]] = bitcast i32* [[AIDX]] to i8**
-; CHECK-NEXT: [[B:%.*]] = load i32, i32* [[BIDX]], align 4
+; CHECK-NEXT: [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[BIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[CIDX:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[AIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[B:%.*]] = load i32, ptr [[BIDX]], align 4
; CHECK-NEXT: [[A_P1:%.*]] = add i32 [[B]], 2
-; CHECK-NEXT: store i32 [[A_P1]], i32* [[AIDX_NEXT]], align 4
-; CHECK-NEXT: [[A:%.*]] = load i8*, i8** [[AIDX_I8P]], align 8
-; CHECK-NEXT: [[C:%.*]] = getelementptr i8, i8* [[A]], i32 57
-; CHECK-NEXT: [[C_I64P:%.*]] = ptrtoint i8* [[C]] to i32
-; CHECK-NEXT: store i32 [[C_I64P]], i32* [[CIDX]], align 4
+; CHECK-NEXT: store i32 [[A_P1]], ptr [[AIDX_NEXT]], align 4
+; CHECK-NEXT: [[A:%.*]] = load ptr, ptr [[AIDX]], align 8
+; CHECK-NEXT: [[C:%.*]] = getelementptr i8, ptr [[A]], i32 57
+; CHECK-NEXT: [[C_I64P:%.*]] = ptrtoint ptr [[C]] to i32
+; CHECK-NEXT: store i32 [[C_I64P]], ptr [[CIDX]], align 4
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; CHECK: for.end:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %Aidx_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
- %Bidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %Cidx = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
- %Aidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %Aidx.i8p = bitcast i32* %Aidx to i8**
+ %Aidx_next = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+ %Bidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %Cidx = getelementptr inbounds i32, ptr %C, i64 %indvars.iv
+ %Aidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
- %b = load i32, i32* %Bidx, align 4
+ %b = load i32, ptr %Bidx, align 4
%a_p1 = add i32 %b, 2
- store i32 %a_p1, i32* %Aidx_next, align 4
+ store i32 %a_p1, ptr %Aidx_next, align 4
- %a = load i8*, i8** %Aidx.i8p, align 8
- %c = getelementptr i8, i8* %a, i32 57
- %c.i64p = ptrtoint i8* %c to i32
- store i32 %c.i64p, i32* %Cidx, align 4
+ %a = load ptr, ptr %Aidx, align 8
+ %c = getelementptr i8, ptr %a, i32 57
+ %c.i64p = ptrtoint ptr %c to i32
+ store i32 %c.i64p, ptr %Cidx, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
; TODO
; Make sure loop-load-elimination triggers for a loop with uncomputable
; backedge-taken counts when no runtime checks are required.
-define void @load_elim_no_runtime_checks(i32* noalias %A, i32* noalias %B, i32* noalias %C, i32 %N) {
+define void @load_elim_no_runtime_checks(ptr noalias %A, ptr noalias %B, ptr noalias %C, i32 %N) {
; CHECK-LABEL: load_elim_no_runtime_checks
; CHECK-NEXT: entry:
; CHECK-NEXT: br label %for.body
%indvars.iv = phi i32 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%indvars.iv.next = add nuw nsw i32 %indvars.iv, 1
- %Aidx_next = getelementptr inbounds i32, i32* %A, i32 %indvars.iv.next
- %Bidx = getelementptr inbounds i32, i32* %B, i32 %indvars.iv
- %Cidx = getelementptr inbounds i32, i32* %C, i32 %indvars.iv
- %Aidx = getelementptr inbounds i32, i32* %A, i32 %indvars.iv
+ %Aidx_next = getelementptr inbounds i32, ptr %A, i32 %indvars.iv.next
+ %Bidx = getelementptr inbounds i32, ptr %B, i32 %indvars.iv
+ %Cidx = getelementptr inbounds i32, ptr %C, i32 %indvars.iv
+ %Aidx = getelementptr inbounds i32, ptr %A, i32 %indvars.iv
- %b = load i32, i32* %Bidx, align 4
+ %b = load i32, ptr %Bidx, align 4
%a_p1 = add i32 %b, 2
- store i32 %a_p1, i32* %Aidx_next, align 4
+ store i32 %a_p1, ptr %Aidx_next, align 4
- %a = load i32, i32* %Aidx, align 1
+ %a = load i32, ptr %Aidx, align 1
%c = mul i32 %a, 2
- store i32 %c, i32* %Cidx, align 4
+ store i32 %c, ptr %Cidx, align 4
%exitcond = icmp eq i32 %indvars.iv.next, %a
br i1 %exitcond, label %for.end, label %for.body
; Make sure loop-load-elimination triggers for a loop with uncomputable
; backedge-taken counts when no runtime checks are required.
-define void @load_elim_wrapping_runtime_checks(i32* noalias %A, i32* noalias %B, i32* noalias %C, i32 %N) {
+define void @load_elim_wrapping_runtime_checks(ptr noalias %A, ptr noalias %B, ptr noalias %C, i32 %N) {
; CHECK-LABEL: @load_elim_wrapping_runtime_checks
; CHECK-NEXT: entry:
; CHECK-NEXT: br label %for.body
%indvars.iv = phi i32 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%indvars.iv.next = add i32 %indvars.iv, 1
- %Aidx_next = getelementptr inbounds i32, i32* %A, i32 %indvars.iv.next
- %Bidx = getelementptr inbounds i32, i32* %B, i32 %indvars.iv
- %Cidx = getelementptr inbounds i32, i32* %C, i32 %indvars.iv
- %Aidx = getelementptr inbounds i32, i32* %A, i32 %indvars.iv
+ %Aidx_next = getelementptr inbounds i32, ptr %A, i32 %indvars.iv.next
+ %Bidx = getelementptr inbounds i32, ptr %B, i32 %indvars.iv
+ %Cidx = getelementptr inbounds i32, ptr %C, i32 %indvars.iv
+ %Aidx = getelementptr inbounds i32, ptr %A, i32 %indvars.iv
- %b = load i32, i32* %Bidx, align 4
+ %b = load i32, ptr %Bidx, align 4
%a_p1 = add i32 %b, 2
- store i32 %a_p1, i32* %Aidx_next, align 4
+ store i32 %a_p1, ptr %Aidx_next, align 4
- %a = load i32, i32* %Aidx, align 1
+ %a = load i32, ptr %Aidx, align 1
%c = mul i32 %a, 2
- store i32 %c, i32* %Cidx, align 4
+ store i32 %c, ptr %Cidx, align 4
%exitcond = icmp eq i32 %indvars.iv.next, %a
br i1 %exitcond, label %for.end, label %for.body
; Make sure we do not crash when dealing with uncomputable backedge-taken counts
; and a variable distance between accesses.
-define void @uncomputable_btc_crash(i8* %row, i32 %filter, i32* noalias %exits) local_unnamed_addr #0 {
+define void @uncomputable_btc_crash(ptr %row, i32 %filter, ptr noalias %exits) local_unnamed_addr #0 {
; CHECK-LABEL: @uncomputable_btc_crash
; CHECK-NEXT: entry:
; CHECK-NEXT: getelementptr
; CHECK-NEXT: br label %loop
;
entry:
- %add.ptr = getelementptr inbounds i8, i8* %row, i32 %filter
+ %add.ptr = getelementptr inbounds i8, ptr %row, i32 %filter
br label %loop
loop:
%iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
- %add.ptr.gep = getelementptr i8, i8* %add.ptr, i32 %iv
- %row.gep = getelementptr i8, i8* %row, i32 %iv
- %gep.1 = getelementptr i8, i8* %add.ptr.gep, i32 0
- %gep.2 = getelementptr i8, i8* %row.gep, i32 0
- %l = load i8, i8* %gep.2, align 1
- store i8 %l, i8* %gep.1, align 1
+ %add.ptr.gep = getelementptr i8, ptr %add.ptr, i32 %iv
+ %row.gep = getelementptr i8, ptr %row, i32 %iv
+ %l = load i8, ptr %row.gep, align 1
+ store i8 %l, ptr %add.ptr.gep, align 1
%iv.next = add i32 %iv, 8
- %exit.gep = getelementptr i32, i32* %exits, i32 %iv
- %lv = load i32, i32* %exit.gep
+ %exit.gep = getelementptr i32, ptr %exits, i32 %iv
+ %lv = load i32, ptr %exit.gep
%c = icmp eq i32 %lv, 120
br i1 %c, label %exit, label %loop
; D[i] = A[i] + 2;
; }
-define void @f(i32* noalias %A, i32* noalias %B, i32* noalias %C,
- i32* noalias %D, i64 %N) {
+define void @f(ptr noalias %A, ptr noalias %B, ptr noalias %C,
+ ptr noalias %D, i64 %N) {
entry:
; for.body.ph:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %Aidx_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
- %Bidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %Cidx = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
- %Didx = getelementptr inbounds i32, i32* %D, i64 %indvars.iv
- %Aidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %Aidx_next = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+ %Bidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %Cidx = getelementptr inbounds i32, ptr %C, i64 %indvars.iv
+ %Didx = getelementptr inbounds i32, ptr %D, i64 %indvars.iv
+ %Aidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
%indvars.m2 = mul nuw nsw i64 %indvars.iv, 2
- %A2idx = getelementptr inbounds i32, i32* %A, i64 %indvars.m2
+ %A2idx = getelementptr inbounds i32, ptr %A, i64 %indvars.m2
- %b = load i32, i32* %Bidx, align 4
+ %b = load i32, ptr %Bidx, align 4
%a_p1 = add i32 %b, 2
- store i32 %a_p1, i32* %Aidx_next, align 4
+ store i32 %a_p1, ptr %Aidx_next, align 4
- %c = load i32, i32* %Cidx, align 4
+ %c = load i32, ptr %Cidx, align 4
%a_m2 = add i32 %c, 2
- store i32 %a_m2, i32* %A2idx, align 4
+ store i32 %a_m2, ptr %A2idx, align 4
- %a = load i32, i32* %Aidx, align 4
+ %a = load i32, ptr %Aidx, align 4
; CHECK-NOT: %d = add i32 %store_forwarded, 2
; CHECK: %d = add i32 %a, 2
%d = add i32 %a, 2
- store i32 %d, i32* %Didx, align 4
+ store i32 %d, ptr %Didx, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
; }
; Function Attrs: norecurse nounwind uwtable
-define void @goo32(float %alpha, float* %a, float* readonly %b) #0 {
+define void @goo32(float %alpha, ptr %a, ptr readonly %b) #0 {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %b, i64 %indvars.iv
+ %0 = load float, ptr %arrayidx, align 4
%mul = fmul float %0, %alpha
- %arrayidx2 = getelementptr inbounds float, float* %a, i64 %indvars.iv
- %1 = load float, float* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds float, ptr %a, i64 %indvars.iv
+ %1 = load float, ptr %arrayidx2, align 4
%add = fadd float %1, %mul
- store float %add, float* %arrayidx2, align 4
+ store float %add, ptr %arrayidx2, align 4
%2 = or i64 %indvars.iv, 1
- %arrayidx5 = getelementptr inbounds float, float* %b, i64 %2
- %3 = load float, float* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds float, ptr %b, i64 %2
+ %3 = load float, ptr %arrayidx5, align 4
%mul6 = fmul float %3, %alpha
- %arrayidx9 = getelementptr inbounds float, float* %a, i64 %2
- %4 = load float, float* %arrayidx9, align 4
+ %arrayidx9 = getelementptr inbounds float, ptr %a, i64 %2
+ %4 = load float, ptr %arrayidx9, align 4
%add10 = fadd float %4, %mul6
- store float %add10, float* %arrayidx9, align 4
+ store float %add10, ptr %arrayidx9, align 4
%5 = or i64 %indvars.iv, 2
- %arrayidx13 = getelementptr inbounds float, float* %b, i64 %5
- %6 = load float, float* %arrayidx13, align 4
+ %arrayidx13 = getelementptr inbounds float, ptr %b, i64 %5
+ %6 = load float, ptr %arrayidx13, align 4
%mul14 = fmul float %6, %alpha
- %arrayidx17 = getelementptr inbounds float, float* %a, i64 %5
- %7 = load float, float* %arrayidx17, align 4
+ %arrayidx17 = getelementptr inbounds float, ptr %a, i64 %5
+ %7 = load float, ptr %arrayidx17, align 4
%add18 = fadd float %7, %mul14
- store float %add18, float* %arrayidx17, align 4
+ store float %add18, ptr %arrayidx17, align 4
%8 = or i64 %indvars.iv, 3
- %arrayidx21 = getelementptr inbounds float, float* %b, i64 %8
- %9 = load float, float* %arrayidx21, align 4
+ %arrayidx21 = getelementptr inbounds float, ptr %b, i64 %8
+ %9 = load float, ptr %arrayidx21, align 4
%mul22 = fmul float %9, %alpha
- %arrayidx25 = getelementptr inbounds float, float* %a, i64 %8
- %10 = load float, float* %arrayidx25, align 4
+ %arrayidx25 = getelementptr inbounds float, ptr %a, i64 %8
+ %10 = load float, ptr %arrayidx25, align 4
%add26 = fadd float %10, %mul22
- store float %add26, float* %arrayidx25, align 4
+ store float %add26, ptr %arrayidx25, align 4
%11 = or i64 %indvars.iv, 4
- %arrayidx29 = getelementptr inbounds float, float* %b, i64 %11
- %12 = load float, float* %arrayidx29, align 4
+ %arrayidx29 = getelementptr inbounds float, ptr %b, i64 %11
+ %12 = load float, ptr %arrayidx29, align 4
%mul30 = fmul float %12, %alpha
- %arrayidx33 = getelementptr inbounds float, float* %a, i64 %11
- %13 = load float, float* %arrayidx33, align 4
+ %arrayidx33 = getelementptr inbounds float, ptr %a, i64 %11
+ %13 = load float, ptr %arrayidx33, align 4
%add34 = fadd float %13, %mul30
- store float %add34, float* %arrayidx33, align 4
+ store float %add34, ptr %arrayidx33, align 4
%14 = or i64 %indvars.iv, 5
- %arrayidx37 = getelementptr inbounds float, float* %b, i64 %14
- %15 = load float, float* %arrayidx37, align 4
+ %arrayidx37 = getelementptr inbounds float, ptr %b, i64 %14
+ %15 = load float, ptr %arrayidx37, align 4
%mul38 = fmul float %15, %alpha
- %arrayidx41 = getelementptr inbounds float, float* %a, i64 %14
- %16 = load float, float* %arrayidx41, align 4
+ %arrayidx41 = getelementptr inbounds float, ptr %a, i64 %14
+ %16 = load float, ptr %arrayidx41, align 4
%add42 = fadd float %16, %mul38
- store float %add42, float* %arrayidx41, align 4
+ store float %add42, ptr %arrayidx41, align 4
%17 = or i64 %indvars.iv, 6
- %arrayidx45 = getelementptr inbounds float, float* %b, i64 %17
- %18 = load float, float* %arrayidx45, align 4
+ %arrayidx45 = getelementptr inbounds float, ptr %b, i64 %17
+ %18 = load float, ptr %arrayidx45, align 4
%mul46 = fmul float %18, %alpha
- %arrayidx49 = getelementptr inbounds float, float* %a, i64 %17
- %19 = load float, float* %arrayidx49, align 4
+ %arrayidx49 = getelementptr inbounds float, ptr %a, i64 %17
+ %19 = load float, ptr %arrayidx49, align 4
%add50 = fadd float %19, %mul46
- store float %add50, float* %arrayidx49, align 4
+ store float %add50, ptr %arrayidx49, align 4
%20 = or i64 %indvars.iv, 7
- %arrayidx53 = getelementptr inbounds float, float* %b, i64 %20
- %21 = load float, float* %arrayidx53, align 4
+ %arrayidx53 = getelementptr inbounds float, ptr %b, i64 %20
+ %21 = load float, ptr %arrayidx53, align 4
%mul54 = fmul float %21, %alpha
- %arrayidx57 = getelementptr inbounds float, float* %a, i64 %20
- %22 = load float, float* %arrayidx57, align 4
+ %arrayidx57 = getelementptr inbounds float, ptr %a, i64 %20
+ %22 = load float, ptr %arrayidx57, align 4
%add58 = fadd float %22, %mul54
- store float %add58, float* %arrayidx57, align 4
+ store float %add58, ptr %arrayidx57, align 4
%23 = or i64 %indvars.iv, 8
- %arrayidx61 = getelementptr inbounds float, float* %b, i64 %23
- %24 = load float, float* %arrayidx61, align 4
+ %arrayidx61 = getelementptr inbounds float, ptr %b, i64 %23
+ %24 = load float, ptr %arrayidx61, align 4
%mul62 = fmul float %24, %alpha
- %arrayidx65 = getelementptr inbounds float, float* %a, i64 %23
- %25 = load float, float* %arrayidx65, align 4
+ %arrayidx65 = getelementptr inbounds float, ptr %a, i64 %23
+ %25 = load float, ptr %arrayidx65, align 4
%add66 = fadd float %25, %mul62
- store float %add66, float* %arrayidx65, align 4
+ store float %add66, ptr %arrayidx65, align 4
%26 = or i64 %indvars.iv, 9
- %arrayidx69 = getelementptr inbounds float, float* %b, i64 %26
- %27 = load float, float* %arrayidx69, align 4
+ %arrayidx69 = getelementptr inbounds float, ptr %b, i64 %26
+ %27 = load float, ptr %arrayidx69, align 4
%mul70 = fmul float %27, %alpha
- %arrayidx73 = getelementptr inbounds float, float* %a, i64 %26
- %28 = load float, float* %arrayidx73, align 4
+ %arrayidx73 = getelementptr inbounds float, ptr %a, i64 %26
+ %28 = load float, ptr %arrayidx73, align 4
%add74 = fadd float %28, %mul70
- store float %add74, float* %arrayidx73, align 4
+ store float %add74, ptr %arrayidx73, align 4
%29 = or i64 %indvars.iv, 10
- %arrayidx77 = getelementptr inbounds float, float* %b, i64 %29
- %30 = load float, float* %arrayidx77, align 4
+ %arrayidx77 = getelementptr inbounds float, ptr %b, i64 %29
+ %30 = load float, ptr %arrayidx77, align 4
%mul78 = fmul float %30, %alpha
- %arrayidx81 = getelementptr inbounds float, float* %a, i64 %29
- %31 = load float, float* %arrayidx81, align 4
+ %arrayidx81 = getelementptr inbounds float, ptr %a, i64 %29
+ %31 = load float, ptr %arrayidx81, align 4
%add82 = fadd float %31, %mul78
- store float %add82, float* %arrayidx81, align 4
+ store float %add82, ptr %arrayidx81, align 4
%32 = or i64 %indvars.iv, 11
- %arrayidx85 = getelementptr inbounds float, float* %b, i64 %32
- %33 = load float, float* %arrayidx85, align 4
+ %arrayidx85 = getelementptr inbounds float, ptr %b, i64 %32
+ %33 = load float, ptr %arrayidx85, align 4
%mul86 = fmul float %33, %alpha
- %arrayidx89 = getelementptr inbounds float, float* %a, i64 %32
- %34 = load float, float* %arrayidx89, align 4
+ %arrayidx89 = getelementptr inbounds float, ptr %a, i64 %32
+ %34 = load float, ptr %arrayidx89, align 4
%add90 = fadd float %34, %mul86
- store float %add90, float* %arrayidx89, align 4
+ store float %add90, ptr %arrayidx89, align 4
%35 = or i64 %indvars.iv, 12
- %arrayidx93 = getelementptr inbounds float, float* %b, i64 %35
- %36 = load float, float* %arrayidx93, align 4
+ %arrayidx93 = getelementptr inbounds float, ptr %b, i64 %35
+ %36 = load float, ptr %arrayidx93, align 4
%mul94 = fmul float %36, %alpha
- %arrayidx97 = getelementptr inbounds float, float* %a, i64 %35
- %37 = load float, float* %arrayidx97, align 4
+ %arrayidx97 = getelementptr inbounds float, ptr %a, i64 %35
+ %37 = load float, ptr %arrayidx97, align 4
%add98 = fadd float %37, %mul94
- store float %add98, float* %arrayidx97, align 4
+ store float %add98, ptr %arrayidx97, align 4
%38 = or i64 %indvars.iv, 13
- %arrayidx101 = getelementptr inbounds float, float* %b, i64 %38
- %39 = load float, float* %arrayidx101, align 4
+ %arrayidx101 = getelementptr inbounds float, ptr %b, i64 %38
+ %39 = load float, ptr %arrayidx101, align 4
%mul102 = fmul float %39, %alpha
- %arrayidx105 = getelementptr inbounds float, float* %a, i64 %38
- %40 = load float, float* %arrayidx105, align 4
+ %arrayidx105 = getelementptr inbounds float, ptr %a, i64 %38
+ %40 = load float, ptr %arrayidx105, align 4
%add106 = fadd float %40, %mul102
- store float %add106, float* %arrayidx105, align 4
+ store float %add106, ptr %arrayidx105, align 4
%41 = or i64 %indvars.iv, 14
- %arrayidx109 = getelementptr inbounds float, float* %b, i64 %41
- %42 = load float, float* %arrayidx109, align 4
+ %arrayidx109 = getelementptr inbounds float, ptr %b, i64 %41
+ %42 = load float, ptr %arrayidx109, align 4
%mul110 = fmul float %42, %alpha
- %arrayidx113 = getelementptr inbounds float, float* %a, i64 %41
- %43 = load float, float* %arrayidx113, align 4
+ %arrayidx113 = getelementptr inbounds float, ptr %a, i64 %41
+ %43 = load float, ptr %arrayidx113, align 4
%add114 = fadd float %43, %mul110
- store float %add114, float* %arrayidx113, align 4
+ store float %add114, ptr %arrayidx113, align 4
%44 = or i64 %indvars.iv, 15
- %arrayidx117 = getelementptr inbounds float, float* %b, i64 %44
- %45 = load float, float* %arrayidx117, align 4
+ %arrayidx117 = getelementptr inbounds float, ptr %b, i64 %44
+ %45 = load float, ptr %arrayidx117, align 4
%mul118 = fmul float %45, %alpha
- %arrayidx121 = getelementptr inbounds float, float* %a, i64 %44
- %46 = load float, float* %arrayidx121, align 4
+ %arrayidx121 = getelementptr inbounds float, ptr %a, i64 %44
+ %46 = load float, ptr %arrayidx121, align 4
%add122 = fadd float %46, %mul118
- store float %add122, float* %arrayidx121, align 4
+ store float %add122, ptr %arrayidx121, align 4
%47 = or i64 %indvars.iv, 16
- %arrayidx125 = getelementptr inbounds float, float* %b, i64 %47
- %48 = load float, float* %arrayidx125, align 4
+ %arrayidx125 = getelementptr inbounds float, ptr %b, i64 %47
+ %48 = load float, ptr %arrayidx125, align 4
%mul126 = fmul float %48, %alpha
- %arrayidx129 = getelementptr inbounds float, float* %a, i64 %47
- %49 = load float, float* %arrayidx129, align 4
+ %arrayidx129 = getelementptr inbounds float, ptr %a, i64 %47
+ %49 = load float, ptr %arrayidx129, align 4
%add130 = fadd float %49, %mul126
- store float %add130, float* %arrayidx129, align 4
+ store float %add130, ptr %arrayidx129, align 4
%50 = or i64 %indvars.iv, 17
- %arrayidx133 = getelementptr inbounds float, float* %b, i64 %50
- %51 = load float, float* %arrayidx133, align 4
+ %arrayidx133 = getelementptr inbounds float, ptr %b, i64 %50
+ %51 = load float, ptr %arrayidx133, align 4
%mul134 = fmul float %51, %alpha
- %arrayidx137 = getelementptr inbounds float, float* %a, i64 %50
- %52 = load float, float* %arrayidx137, align 4
+ %arrayidx137 = getelementptr inbounds float, ptr %a, i64 %50
+ %52 = load float, ptr %arrayidx137, align 4
%add138 = fadd float %52, %mul134
- store float %add138, float* %arrayidx137, align 4
+ store float %add138, ptr %arrayidx137, align 4
%53 = or i64 %indvars.iv, 18
- %arrayidx141 = getelementptr inbounds float, float* %b, i64 %53
- %54 = load float, float* %arrayidx141, align 4
+ %arrayidx141 = getelementptr inbounds float, ptr %b, i64 %53
+ %54 = load float, ptr %arrayidx141, align 4
%mul142 = fmul float %54, %alpha
- %arrayidx145 = getelementptr inbounds float, float* %a, i64 %53
- %55 = load float, float* %arrayidx145, align 4
+ %arrayidx145 = getelementptr inbounds float, ptr %a, i64 %53
+ %55 = load float, ptr %arrayidx145, align 4
%add146 = fadd float %55, %mul142
- store float %add146, float* %arrayidx145, align 4
+ store float %add146, ptr %arrayidx145, align 4
%56 = or i64 %indvars.iv, 19
- %arrayidx149 = getelementptr inbounds float, float* %b, i64 %56
- %57 = load float, float* %arrayidx149, align 4
+ %arrayidx149 = getelementptr inbounds float, ptr %b, i64 %56
+ %57 = load float, ptr %arrayidx149, align 4
%mul150 = fmul float %57, %alpha
- %arrayidx153 = getelementptr inbounds float, float* %a, i64 %56
- %58 = load float, float* %arrayidx153, align 4
+ %arrayidx153 = getelementptr inbounds float, ptr %a, i64 %56
+ %58 = load float, ptr %arrayidx153, align 4
%add154 = fadd float %58, %mul150
- store float %add154, float* %arrayidx153, align 4
+ store float %add154, ptr %arrayidx153, align 4
%59 = or i64 %indvars.iv, 20
- %arrayidx157 = getelementptr inbounds float, float* %b, i64 %59
- %60 = load float, float* %arrayidx157, align 4
+ %arrayidx157 = getelementptr inbounds float, ptr %b, i64 %59
+ %60 = load float, ptr %arrayidx157, align 4
%mul158 = fmul float %60, %alpha
- %arrayidx161 = getelementptr inbounds float, float* %a, i64 %59
- %61 = load float, float* %arrayidx161, align 4
+ %arrayidx161 = getelementptr inbounds float, ptr %a, i64 %59
+ %61 = load float, ptr %arrayidx161, align 4
%add162 = fadd float %61, %mul158
- store float %add162, float* %arrayidx161, align 4
+ store float %add162, ptr %arrayidx161, align 4
%62 = or i64 %indvars.iv, 21
- %arrayidx165 = getelementptr inbounds float, float* %b, i64 %62
- %63 = load float, float* %arrayidx165, align 4
+ %arrayidx165 = getelementptr inbounds float, ptr %b, i64 %62
+ %63 = load float, ptr %arrayidx165, align 4
%mul166 = fmul float %63, %alpha
- %arrayidx169 = getelementptr inbounds float, float* %a, i64 %62
- %64 = load float, float* %arrayidx169, align 4
+ %arrayidx169 = getelementptr inbounds float, ptr %a, i64 %62
+ %64 = load float, ptr %arrayidx169, align 4
%add170 = fadd float %64, %mul166
- store float %add170, float* %arrayidx169, align 4
+ store float %add170, ptr %arrayidx169, align 4
%65 = or i64 %indvars.iv, 22
- %arrayidx173 = getelementptr inbounds float, float* %b, i64 %65
- %66 = load float, float* %arrayidx173, align 4
+ %arrayidx173 = getelementptr inbounds float, ptr %b, i64 %65
+ %66 = load float, ptr %arrayidx173, align 4
%mul174 = fmul float %66, %alpha
- %arrayidx177 = getelementptr inbounds float, float* %a, i64 %65
- %67 = load float, float* %arrayidx177, align 4
+ %arrayidx177 = getelementptr inbounds float, ptr %a, i64 %65
+ %67 = load float, ptr %arrayidx177, align 4
%add178 = fadd float %67, %mul174
- store float %add178, float* %arrayidx177, align 4
+ store float %add178, ptr %arrayidx177, align 4
%68 = or i64 %indvars.iv, 23
- %arrayidx181 = getelementptr inbounds float, float* %b, i64 %68
- %69 = load float, float* %arrayidx181, align 4
+ %arrayidx181 = getelementptr inbounds float, ptr %b, i64 %68
+ %69 = load float, ptr %arrayidx181, align 4
%mul182 = fmul float %69, %alpha
- %arrayidx185 = getelementptr inbounds float, float* %a, i64 %68
- %70 = load float, float* %arrayidx185, align 4
+ %arrayidx185 = getelementptr inbounds float, ptr %a, i64 %68
+ %70 = load float, ptr %arrayidx185, align 4
%add186 = fadd float %70, %mul182
- store float %add186, float* %arrayidx185, align 4
+ store float %add186, ptr %arrayidx185, align 4
%71 = or i64 %indvars.iv, 24
- %arrayidx189 = getelementptr inbounds float, float* %b, i64 %71
- %72 = load float, float* %arrayidx189, align 4
+ %arrayidx189 = getelementptr inbounds float, ptr %b, i64 %71
+ %72 = load float, ptr %arrayidx189, align 4
%mul190 = fmul float %72, %alpha
- %arrayidx193 = getelementptr inbounds float, float* %a, i64 %71
- %73 = load float, float* %arrayidx193, align 4
+ %arrayidx193 = getelementptr inbounds float, ptr %a, i64 %71
+ %73 = load float, ptr %arrayidx193, align 4
%add194 = fadd float %73, %mul190
- store float %add194, float* %arrayidx193, align 4
+ store float %add194, ptr %arrayidx193, align 4
%74 = or i64 %indvars.iv, 25
- %arrayidx197 = getelementptr inbounds float, float* %b, i64 %74
- %75 = load float, float* %arrayidx197, align 4
+ %arrayidx197 = getelementptr inbounds float, ptr %b, i64 %74
+ %75 = load float, ptr %arrayidx197, align 4
%mul198 = fmul float %75, %alpha
- %arrayidx201 = getelementptr inbounds float, float* %a, i64 %74
- %76 = load float, float* %arrayidx201, align 4
+ %arrayidx201 = getelementptr inbounds float, ptr %a, i64 %74
+ %76 = load float, ptr %arrayidx201, align 4
%add202 = fadd float %76, %mul198
- store float %add202, float* %arrayidx201, align 4
+ store float %add202, ptr %arrayidx201, align 4
%77 = or i64 %indvars.iv, 26
- %arrayidx205 = getelementptr inbounds float, float* %b, i64 %77
- %78 = load float, float* %arrayidx205, align 4
+ %arrayidx205 = getelementptr inbounds float, ptr %b, i64 %77
+ %78 = load float, ptr %arrayidx205, align 4
%mul206 = fmul float %78, %alpha
- %arrayidx209 = getelementptr inbounds float, float* %a, i64 %77
- %79 = load float, float* %arrayidx209, align 4
+ %arrayidx209 = getelementptr inbounds float, ptr %a, i64 %77
+ %79 = load float, ptr %arrayidx209, align 4
%add210 = fadd float %79, %mul206
- store float %add210, float* %arrayidx209, align 4
+ store float %add210, ptr %arrayidx209, align 4
%80 = or i64 %indvars.iv, 27
- %arrayidx213 = getelementptr inbounds float, float* %b, i64 %80
- %81 = load float, float* %arrayidx213, align 4
+ %arrayidx213 = getelementptr inbounds float, ptr %b, i64 %80
+ %81 = load float, ptr %arrayidx213, align 4
%mul214 = fmul float %81, %alpha
- %arrayidx217 = getelementptr inbounds float, float* %a, i64 %80
- %82 = load float, float* %arrayidx217, align 4
+ %arrayidx217 = getelementptr inbounds float, ptr %a, i64 %80
+ %82 = load float, ptr %arrayidx217, align 4
%add218 = fadd float %82, %mul214
- store float %add218, float* %arrayidx217, align 4
+ store float %add218, ptr %arrayidx217, align 4
%83 = or i64 %indvars.iv, 28
- %arrayidx221 = getelementptr inbounds float, float* %b, i64 %83
- %84 = load float, float* %arrayidx221, align 4
+ %arrayidx221 = getelementptr inbounds float, ptr %b, i64 %83
+ %84 = load float, ptr %arrayidx221, align 4
%mul222 = fmul float %84, %alpha
- %arrayidx225 = getelementptr inbounds float, float* %a, i64 %83
- %85 = load float, float* %arrayidx225, align 4
+ %arrayidx225 = getelementptr inbounds float, ptr %a, i64 %83
+ %85 = load float, ptr %arrayidx225, align 4
%add226 = fadd float %85, %mul222
- store float %add226, float* %arrayidx225, align 4
+ store float %add226, ptr %arrayidx225, align 4
%86 = or i64 %indvars.iv, 29
- %arrayidx229 = getelementptr inbounds float, float* %b, i64 %86
- %87 = load float, float* %arrayidx229, align 4
+ %arrayidx229 = getelementptr inbounds float, ptr %b, i64 %86
+ %87 = load float, ptr %arrayidx229, align 4
%mul230 = fmul float %87, %alpha
- %arrayidx233 = getelementptr inbounds float, float* %a, i64 %86
- %88 = load float, float* %arrayidx233, align 4
+ %arrayidx233 = getelementptr inbounds float, ptr %a, i64 %86
+ %88 = load float, ptr %arrayidx233, align 4
%add234 = fadd float %88, %mul230
- store float %add234, float* %arrayidx233, align 4
+ store float %add234, ptr %arrayidx233, align 4
%89 = or i64 %indvars.iv, 30
- %arrayidx237 = getelementptr inbounds float, float* %b, i64 %89
- %90 = load float, float* %arrayidx237, align 4
+ %arrayidx237 = getelementptr inbounds float, ptr %b, i64 %89
+ %90 = load float, ptr %arrayidx237, align 4
%mul238 = fmul float %90, %alpha
- %arrayidx241 = getelementptr inbounds float, float* %a, i64 %89
- %91 = load float, float* %arrayidx241, align 4
+ %arrayidx241 = getelementptr inbounds float, ptr %a, i64 %89
+ %91 = load float, ptr %arrayidx241, align 4
%add242 = fadd float %91, %mul238
- store float %add242, float* %arrayidx241, align 4
+ store float %add242, ptr %arrayidx241, align 4
%92 = or i64 %indvars.iv, 31
- %arrayidx245 = getelementptr inbounds float, float* %b, i64 %92
- %93 = load float, float* %arrayidx245, align 4
+ %arrayidx245 = getelementptr inbounds float, ptr %b, i64 %92
+ %93 = load float, ptr %arrayidx245, align 4
%mul246 = fmul float %93, %alpha
- %arrayidx249 = getelementptr inbounds float, float* %a, i64 %92
- %94 = load float, float* %arrayidx249, align 4
+ %arrayidx249 = getelementptr inbounds float, ptr %a, i64 %92
+ %94 = load float, ptr %arrayidx249, align 4
%add250 = fadd float %94, %mul246
- store float %add250, float* %arrayidx249, align 4
+ store float %add250, ptr %arrayidx249, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 32
%cmp = icmp slt i64 %indvars.iv.next, 3200
br i1 %cmp, label %for.body, label %for.end
; CHECK: for.body:
; CHECK: %indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %entry ]
-; CHECK: %arrayidx = getelementptr inbounds float, float* %b, i64 %indvar
-; CHECK: %0 = load float, float* %arrayidx, align 4
+; CHECK: %arrayidx = getelementptr inbounds float, ptr %b, i64 %indvar
+; CHECK: %0 = load float, ptr %arrayidx, align 4
; CHECK: %mul = fmul float %0, %alpha
-; CHECK: %arrayidx2 = getelementptr inbounds float, float* %a, i64 %indvar
-; CHECK: %1 = load float, float* %arrayidx2, align 4
+; CHECK: %arrayidx2 = getelementptr inbounds float, ptr %a, i64 %indvar
+; CHECK: %1 = load float, ptr %arrayidx2, align 4
; CHECK: %add = fadd float %1, %mul
-; CHECK: store float %add, float* %arrayidx2, align 4
+; CHECK: store float %add, ptr %arrayidx2, align 4
; CHECK: %indvar.next = add i64 %indvar, 1
; CHECK: %exitcond = icmp eq i64 %indvar, 3199
; CHECK: br i1 %exitcond, label %for.end, label %for.body
; RUN: opt -S -passes=loop-reroll %s | FileCheck %s
target triple = "aarch64--linux-gnu"
-define void @test(i32 %n, float* %arrayidx200, float* %arrayidx164, float* %arrayidx172) {
+define void @test(i32 %n, ptr %arrayidx200, ptr %arrayidx164, ptr %arrayidx172) {
entry:
%rem.i = srem i32 %n, 4
- %t22 = load float, float* %arrayidx172, align 4
+ %t22 = load float, ptr %arrayidx172, align 4
%cmp.9 = icmp eq i32 %n, 0
%t7 = sext i32 %n to i64
br i1 %cmp.9, label %while.end, label %while.body.preheader
while.body:
;CHECK-LABEL: while.body:
;CHECK-NEXT: %indvar = phi i64 [ %indvar.next, %while.body ], [ 0, %while.body.preheader ]
-;CHECK-NEXT: %arrayidx62.i = getelementptr inbounds float, float* %arrayidx200, i64 %indvar
-;CHECK-NEXT: %t1 = load float, float* %arrayidx62.i, align 4
-;CHECK-NEXT: %arrayidx64.i = getelementptr inbounds float, float* %arrayidx164, i64 %indvar
-;CHECK-NEXT: %t2 = load float, float* %arrayidx64.i, align 4
+;CHECK-NEXT: %arrayidx62.i = getelementptr inbounds float, ptr %arrayidx200, i64 %indvar
+;CHECK-NEXT: %t1 = load float, ptr %arrayidx62.i, align 4
+;CHECK-NEXT: %arrayidx64.i = getelementptr inbounds float, ptr %arrayidx164, i64 %indvar
+;CHECK-NEXT: %t2 = load float, ptr %arrayidx64.i, align 4
;CHECK-NEXT: %mul65.i = fmul fast float %t2, %t22
;CHECK-NEXT: %add66.i = fadd fast float %mul65.i, %t1
-;CHECK-NEXT: store float %add66.i, float* %arrayidx62.i, align 4
+;CHECK-NEXT: store float %add66.i, ptr %arrayidx62.i, align 4
;CHECK-NEXT: %indvar.next = add i64 %indvar, 1
;CHECK-NEXT: %exitcond = icmp eq i64 %indvar, %{{[0-9]+}}
;CHECK-NEXT: br i1 %exitcond, label %while.end.loopexit, label %while.body
%indvars.iv.i423 = phi i64 [ %indvars.iv.next.i424, %while.body ], [ 0, %while.body.preheader ]
%i.22.i = phi i32 [ %add103.i, %while.body ], [ %rem.i, %while.body.preheader ]
- %arrayidx62.i = getelementptr inbounds float, float* %arrayidx200, i64 %indvars.iv.i423
- %t1 = load float, float* %arrayidx62.i, align 4
- %arrayidx64.i = getelementptr inbounds float, float* %arrayidx164, i64 %indvars.iv.i423
- %t2 = load float, float* %arrayidx64.i, align 4
+ %arrayidx62.i = getelementptr inbounds float, ptr %arrayidx200, i64 %indvars.iv.i423
+ %t1 = load float, ptr %arrayidx62.i, align 4
+ %arrayidx64.i = getelementptr inbounds float, ptr %arrayidx164, i64 %indvars.iv.i423
+ %t2 = load float, ptr %arrayidx64.i, align 4
%mul65.i = fmul fast float %t2, %t22
%add66.i = fadd fast float %mul65.i, %t1
- store float %add66.i, float* %arrayidx62.i, align 4
+ store float %add66.i, ptr %arrayidx62.i, align 4
%t3 = add nsw i64 %indvars.iv.i423, 1
- %arrayidx71.i = getelementptr inbounds float, float* %arrayidx200, i64 %t3
- %t4 = load float, float* %arrayidx71.i, align 4
- %arrayidx74.i = getelementptr inbounds float, float* %arrayidx164, i64 %t3
- %t5 = load float, float* %arrayidx74.i, align 4
+ %arrayidx71.i = getelementptr inbounds float, ptr %arrayidx200, i64 %t3
+ %t4 = load float, ptr %arrayidx71.i, align 4
+ %arrayidx74.i = getelementptr inbounds float, ptr %arrayidx164, i64 %t3
+ %t5 = load float, ptr %arrayidx74.i, align 4
%mul75.i = fmul fast float %t5, %t22
%add76.i = fadd fast float %mul75.i, %t4
- store float %add76.i, float* %arrayidx71.i, align 4
+ store float %add76.i, ptr %arrayidx71.i, align 4
%add103.i = add nsw i32 %i.22.i, 2
%t6 = sext i32 %add103.i to i64
%cmp58.i = icmp slt i64 %t6, %t7
}
; Function Attrs: noinline norecurse nounwind
-define i32 @test2(i64 %n, i32* nocapture %x, i32* nocapture readonly %y) {
+define i32 @test2(i64 %n, ptr nocapture %x, ptr nocapture readonly %y) {
entry:
%cmp18 = icmp sgt i64 %n, 0
br i1 %cmp18, label %for.body.preheader, label %for.end
;CHECK-LABEL: for.body:
;CHECK-NEXT: %indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %for.body.preheader ]
-;CHECK-NEXT: %arrayidx = getelementptr inbounds i32, i32* %y, i64 %indvar
-;CHECK-NEXT: [[T1:%[0-9]+]] = load i32, i32* %arrayidx, align 4
-;CHECK-NEXT: %arrayidx3 = getelementptr inbounds i32, i32* %x, i64 %indvar
-;CHECK-NEXT: store i32 [[T1]], i32* %arrayidx3, align 4
+;CHECK-NEXT: %arrayidx = getelementptr inbounds i32, ptr %y, i64 %indvar
+;CHECK-NEXT: [[T1:%[0-9]+]] = load i32, ptr %arrayidx, align 4
+;CHECK-NEXT: %arrayidx3 = getelementptr inbounds i32, ptr %x, i64 %indvar
+;CHECK-NEXT: store i32 [[T1]], ptr %arrayidx3, align 4
;CHECK-NEXT: %indvar.next = add i64 %indvar, 1
;CHECK-NEXT: %exitcond = icmp eq i64 %indvar, %{{[0-9]+}}
;CHECK-NEXT: br i1 %exitcond, label %for.end.loopexit, label %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds i32, i32* %y, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
- %arrayidx3 = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
- store i32 %0, i32* %arrayidx3, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %y, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr %x, i64 %indvars.iv
+ store i32 %0, ptr %arrayidx3, align 4
%1 = or i64 %indvars.iv, 1
- %arrayidx5 = getelementptr inbounds i32, i32* %y, i64 %1
- %2 = load i32, i32* %arrayidx5, align 4
- %arrayidx8 = getelementptr inbounds i32, i32* %x, i64 %1
- store i32 %2, i32* %arrayidx8, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %y, i64 %1
+ %2 = load i32, ptr %arrayidx5, align 4
+ %arrayidx8 = getelementptr inbounds i32, ptr %x, i64 %1
+ store i32 %2, ptr %arrayidx8, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
%cmp = icmp slt i64 %indvars.iv.next, %n
br i1 %cmp, label %for.body, label %for.end.loopexit
}
; Function Attrs: noinline norecurse nounwind
-define i32 @test3(i32 %n, i32* nocapture %x, i32* nocapture readonly %y) {
+define i32 @test3(i32 %n, ptr nocapture %x, ptr nocapture readonly %y) {
entry:
%cmp21 = icmp sgt i32 %n, 0
br i1 %cmp21, label %for.body.preheader, label %for.end
%conv23 = phi i32 [ %conv, %for.body ], [ 0, %for.body.preheader ]
%i.022 = phi i8 [ %add12, %for.body ], [ 0, %for.body.preheader ]
%idxprom = sext i8 %i.022 to i64
- %arrayidx = getelementptr inbounds i32, i32* %y, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
- %arrayidx3 = getelementptr inbounds i32, i32* %x, i64 %idxprom
- store i32 %0, i32* %arrayidx3, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %y, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr %x, i64 %idxprom
+ store i32 %0, ptr %arrayidx3, align 4
%add = or i32 %conv23, 1
%idxprom5 = sext i32 %add to i64
- %arrayidx6 = getelementptr inbounds i32, i32* %y, i64 %idxprom5
- %1 = load i32, i32* %arrayidx6, align 4
- %arrayidx10 = getelementptr inbounds i32, i32* %x, i64 %idxprom5
- store i32 %1, i32* %arrayidx10, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %y, i64 %idxprom5
+ %1 = load i32, ptr %arrayidx6, align 4
+ %arrayidx10 = getelementptr inbounds i32, ptr %x, i64 %idxprom5
+ store i32 %1, ptr %arrayidx10, align 4
%add12 = add i8 %i.022, 2
%conv = sext i8 %add12 to i32
%cmp = icmp slt i32 %conv, %n
}
; Function Attrs: noinline norecurse nounwind
-define i32 @test4(i64 %n, i32* nocapture %x, i32* nocapture readonly %y) {
+define i32 @test4(i64 %n, ptr nocapture %x, ptr nocapture readonly %y) {
entry:
%cmp18 = icmp eq i64 %n, 0
br i1 %cmp18, label %for.end, label %for.body.preheader
;CHECK-LABEL: for.body:
;CHECK-NEXT: %indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %for.body.preheader ]
-;CHECK-NEXT: %arrayidx = getelementptr inbounds i32, i32* %y, i64 %indvar
-;CHECK-NEXT: [[T1:%[0-9]+]] = load i32, i32* %arrayidx, align 4
-;CHECK-NEXT: %arrayidx3 = getelementptr inbounds i32, i32* %x, i64 %indvar
-;CHECK-NEXT: store i32 [[T1]], i32* %arrayidx3, align 4
+;CHECK-NEXT: %arrayidx = getelementptr inbounds i32, ptr %y, i64 %indvar
+;CHECK-NEXT: [[T1:%[0-9]+]] = load i32, ptr %arrayidx, align 4
+;CHECK-NEXT: %arrayidx3 = getelementptr inbounds i32, ptr %x, i64 %indvar
+;CHECK-NEXT: store i32 [[T1]], ptr %arrayidx3, align 4
;CHECK-NEXT: %indvar.next = add i64 %indvar, 1
;CHECK-NEXT: %exitcond = icmp eq i64 %indvar, %{{[0-9]+}}
;CHECK-NEXT: br i1 %exitcond, label %for.end.loopexit, label %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds i32, i32* %y, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
- %arrayidx3 = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
- store i32 %0, i32* %arrayidx3, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %y, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr %x, i64 %indvars.iv
+ store i32 %0, ptr %arrayidx3, align 4
%1 = or i64 %indvars.iv, 1
- %arrayidx5 = getelementptr inbounds i32, i32* %y, i64 %1
- %2 = load i32, i32* %arrayidx5, align 4
- %arrayidx8 = getelementptr inbounds i32, i32* %x, i64 %1
- store i32 %2, i32* %arrayidx8, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %y, i64 %1
+ %2 = load i32, ptr %arrayidx5, align 4
+ %arrayidx8 = getelementptr inbounds i32, ptr %x, i64 %1
+ store i32 %2, ptr %arrayidx8, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
%cmp = icmp ult i64 %indvars.iv.next, %n
br i1 %cmp, label %for.body, label %for.end.loopexit
target triple = "aarch64--linux-gnu"
@buf = global [16 x i8] c"\0A\0A\0A\0A\0A\0A\0A\0A\0A\0A\0A\0A\0A\0A\0A\0A", align 1
-define i32 @test1(i32 %len, i8* nocapture readonly %buf) #0 {
+define i32 @test1(i32 %len, ptr nocapture readonly %buf) #0 {
entry:
%cmp.13 = icmp sgt i32 %len, 1
br i1 %cmp.13, label %while.body.lr.ph, label %while.end
%sum4.015 = phi i64 [ 0, %while.body.lr.ph ], [ %add4, %while.body ]
%len.addr.014 = phi i32 [ %len, %while.body.lr.ph ], [ %sub5, %while.body ]
%idxprom = sext i32 %len.addr.014 to i64
- %arrayidx = getelementptr inbounds i8, i8* %buf, i64 %idxprom
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %buf, i64 %idxprom
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i64
%add = add i64 %conv, %sum4.015
%sub = add nsw i32 %len.addr.014, -1
%idxprom1 = sext i32 %sub to i64
- %arrayidx2 = getelementptr inbounds i8, i8* %buf, i64 %idxprom1
- %1 = load i8, i8* %arrayidx2, align 1
+ %arrayidx2 = getelementptr inbounds i8, ptr %buf, i64 %idxprom1
+ %1 = load i8, ptr %arrayidx2, align 1
%conv3 = zext i8 %1 to i64
%add4 = add i64 %add, %conv3
%sub5 = add nsw i32 %len.addr.014, -2
; A[i+3] = B[i+3] * 4;
; }
;}
-define void @foo(i32* nocapture %A, i32* nocapture readonly %B, i32 %m, i32 %n) {
+define void @foo(ptr nocapture %A, ptr nocapture readonly %B, i32 %m, i32 %n) {
; CHECK-LABEL: @foo(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP34:%.*]] = icmp slt i32 [[M:%.*]], [[N:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[M]], [[INDVAR]]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[TMP6]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[TMP6]]
+; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[TMP7]], 2
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[TMP6]]
-; CHECK-NEXT: store i32 [[MUL]], i32* [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[TMP6]]
+; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[INDVAR_NEXT]] = add i32 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVAR]], [[TMP5]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY]]
for.body: ; preds = %entry, %for.body
%i.035 = phi i32 [ %add18, %for.body ], [ %m, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i32 %i.035
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %B, i32 %i.035
+ %0 = load i32, ptr %arrayidx, align 4
%mul = shl nsw i32 %0, 2
- %arrayidx2 = getelementptr inbounds i32, i32* %A, i32 %i.035
- store i32 %mul, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %A, i32 %i.035
+ store i32 %mul, ptr %arrayidx2, align 4
%add3 = add nsw i32 %i.035, 1
- %arrayidx4 = getelementptr inbounds i32, i32* %B, i32 %add3
- %1 = load i32, i32* %arrayidx4, align 4
+ %arrayidx4 = getelementptr inbounds i32, ptr %B, i32 %add3
+ %1 = load i32, ptr %arrayidx4, align 4
%mul5 = shl nsw i32 %1, 2
- %arrayidx7 = getelementptr inbounds i32, i32* %A, i32 %add3
- store i32 %mul5, i32* %arrayidx7, align 4
+ %arrayidx7 = getelementptr inbounds i32, ptr %A, i32 %add3
+ store i32 %mul5, ptr %arrayidx7, align 4
%add8 = add nsw i32 %i.035, 2
- %arrayidx9 = getelementptr inbounds i32, i32* %B, i32 %add8
- %2 = load i32, i32* %arrayidx9, align 4
+ %arrayidx9 = getelementptr inbounds i32, ptr %B, i32 %add8
+ %2 = load i32, ptr %arrayidx9, align 4
%mul10 = shl nsw i32 %2, 2
- %arrayidx12 = getelementptr inbounds i32, i32* %A, i32 %add8
- store i32 %mul10, i32* %arrayidx12, align 4
+ %arrayidx12 = getelementptr inbounds i32, ptr %A, i32 %add8
+ store i32 %mul10, ptr %arrayidx12, align 4
%add13 = add nsw i32 %i.035, 3
- %arrayidx14 = getelementptr inbounds i32, i32* %B, i32 %add13
- %3 = load i32, i32* %arrayidx14, align 4
+ %arrayidx14 = getelementptr inbounds i32, ptr %B, i32 %add13
+ %3 = load i32, ptr %arrayidx14, align 4
%mul15 = shl nsw i32 %3, 2
- %arrayidx17 = getelementptr inbounds i32, i32* %A, i32 %add13
- store i32 %mul15, i32* %arrayidx17, align 4
+ %arrayidx17 = getelementptr inbounds i32, ptr %A, i32 %add13
+ store i32 %mul15, ptr %arrayidx17, align 4
%add18 = add nsw i32 %i.035, 4
%cmp = icmp slt i32 %add18, %n
br i1 %cmp, label %for.body, label %for.end
ret void
}
-;void daxpy_ur(int n,float da,float *dx,float *dy)
+;void daxpy_ur(int n,float da,ptr dx,ptr dy)
; {
; int m = n % 4;
; for (int i = m; i < n; i = i + 4)
; dy[i+3] = dy[i+3] + da*dx[i+3];
; }
; }
-define void @daxpy_ur(i32 %n, float %da, float* nocapture readonly %dx, float* nocapture %dy) {
+define void @daxpy_ur(i32 %n, float %da, ptr nocapture readonly %dx, ptr nocapture %dy) {
; CHECK-LABEL: @daxpy_ur(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[REM:%.*]] = srem i32 [[N:%.*]], 4
; CHECK: for.body:
; CHECK-NEXT: [[INDVAR:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[REM]], [[INDVAR]]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[DY:%.*]], i32 [[TMP5]]
-; CHECK-NEXT: [[TMP6:%.*]] = load float, float* [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, float* [[DX:%.*]], i32 [[TMP5]]
-; CHECK-NEXT: [[TMP7:%.*]] = load float, float* [[ARRAYIDX1]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[DY:%.*]], i32 [[TMP5]]
+; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[DX:%.*]], i32 [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: [[MUL:%.*]] = fmul float [[TMP7]], [[DA:%.*]]
; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP6]], [[MUL]]
-; CHECK-NEXT: store float [[ADD]], float* [[ARRAYIDX]], align 4
+; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INDVAR_NEXT]] = add i32 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVAR]], [[TMP4]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY]]
for.body: ; preds = %entry, %for.body
%i.056 = phi i32 [ %add27, %for.body ], [ %rem, %entry ]
- %arrayidx = getelementptr inbounds float, float* %dy, i32 %i.056
- %0 = load float, float* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds float, float* %dx, i32 %i.056
- %1 = load float, float* %arrayidx1, align 4
+ %arrayidx = getelementptr inbounds float, ptr %dy, i32 %i.056
+ %0 = load float, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds float, ptr %dx, i32 %i.056
+ %1 = load float, ptr %arrayidx1, align 4
%mul = fmul float %1, %da
%add = fadd float %0, %mul
- store float %add, float* %arrayidx, align 4
+ store float %add, ptr %arrayidx, align 4
%add3 = add nsw i32 %i.056, 1
- %arrayidx4 = getelementptr inbounds float, float* %dy, i32 %add3
- %2 = load float, float* %arrayidx4, align 4
- %arrayidx6 = getelementptr inbounds float, float* %dx, i32 %add3
- %3 = load float, float* %arrayidx6, align 4
+ %arrayidx4 = getelementptr inbounds float, ptr %dy, i32 %add3
+ %2 = load float, ptr %arrayidx4, align 4
+ %arrayidx6 = getelementptr inbounds float, ptr %dx, i32 %add3
+ %3 = load float, ptr %arrayidx6, align 4
%mul7 = fmul float %3, %da
%add8 = fadd float %2, %mul7
- store float %add8, float* %arrayidx4, align 4
+ store float %add8, ptr %arrayidx4, align 4
%add11 = add nsw i32 %i.056, 2
- %arrayidx12 = getelementptr inbounds float, float* %dy, i32 %add11
- %4 = load float, float* %arrayidx12, align 4
- %arrayidx14 = getelementptr inbounds float, float* %dx, i32 %add11
- %5 = load float, float* %arrayidx14, align 4
+ %arrayidx12 = getelementptr inbounds float, ptr %dy, i32 %add11
+ %4 = load float, ptr %arrayidx12, align 4
+ %arrayidx14 = getelementptr inbounds float, ptr %dx, i32 %add11
+ %5 = load float, ptr %arrayidx14, align 4
%mul15 = fmul float %5, %da
%add16 = fadd float %4, %mul15
- store float %add16, float* %arrayidx12, align 4
+ store float %add16, ptr %arrayidx12, align 4
%add19 = add nsw i32 %i.056, 3
- %arrayidx20 = getelementptr inbounds float, float* %dy, i32 %add19
- %6 = load float, float* %arrayidx20, align 4
- %arrayidx22 = getelementptr inbounds float, float* %dx, i32 %add19
- %7 = load float, float* %arrayidx22, align 4
+ %arrayidx20 = getelementptr inbounds float, ptr %dy, i32 %add19
+ %6 = load float, ptr %arrayidx20, align 4
+ %arrayidx22 = getelementptr inbounds float, ptr %dx, i32 %add19
+ %7 = load float, ptr %arrayidx22, align 4
%mul23 = fmul float %7, %da
%add24 = fadd float %6, %mul23
- store float %add24, float* %arrayidx20, align 4
+ store float %add24, ptr %arrayidx20, align 4
%add27 = add nsw i32 %i.056, 4
%cmp = icmp slt i32 %add27, %n
br i1 %cmp, label %for.body, label %for.end
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define i32 @foo(i32* nocapture readonly %x) #0 {
+define i32 @foo(ptr nocapture readonly %x) #0 {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%r.029 = phi i32 [ 0, %entry ], [ %add12, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %x, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %r.029
%1 = or i64 %indvars.iv, 1
- %arrayidx3 = getelementptr inbounds i32, i32* %x, i64 %1
- %2 = load i32, i32* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr %x, i64 %1
+ %2 = load i32, ptr %arrayidx3, align 4
%add4 = add nsw i32 %add, %2
%3 = or i64 %indvars.iv, 2
- %arrayidx7 = getelementptr inbounds i32, i32* %x, i64 %3
- %4 = load i32, i32* %arrayidx7, align 4
+ %arrayidx7 = getelementptr inbounds i32, ptr %x, i64 %3
+ %4 = load i32, ptr %arrayidx7, align 4
%add8 = add nsw i32 %add4, %4
%5 = or i64 %indvars.iv, 3
- %arrayidx11 = getelementptr inbounds i32, i32* %x, i64 %5
- %6 = load i32, i32* %arrayidx11, align 4
+ %arrayidx11 = getelementptr inbounds i32, ptr %x, i64 %5
+ %6 = load i32, ptr %arrayidx11, align 4
%add12 = add nsw i32 %add8, %6
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
%7 = trunc i64 %indvars.iv.next to i32
; CHECK: for.body:
; CHECK: %indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %entry ]
; CHECK: %r.029 = phi i32 [ 0, %entry ], [ %add, %for.body ]
-; CHECK: %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvar
-; CHECK: %1 = load i32, i32* %arrayidx, align 4
+; CHECK: %arrayidx = getelementptr inbounds i32, ptr %x, i64 %indvar
+; CHECK: %1 = load i32, ptr %arrayidx, align 4
; CHECK: %add = add nsw i32 %1, %r.029
; CHECK: %indvar.next = add i64 %indvar, 1
; CHECK: %exitcond = icmp eq i32 %0, 399
ret i32 %add12
}
-define float @bar(float* nocapture readonly %x) #0 {
+define float @bar(ptr nocapture readonly %x) #0 {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%r.029 = phi float [ 0.0, %entry ], [ %add12, %for.body ]
- %arrayidx = getelementptr inbounds float, float* %x, i64 %indvars.iv
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %x, i64 %indvars.iv
+ %0 = load float, ptr %arrayidx, align 4
%add = fadd float %0, %r.029
%1 = or i64 %indvars.iv, 1
- %arrayidx3 = getelementptr inbounds float, float* %x, i64 %1
- %2 = load float, float* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds float, ptr %x, i64 %1
+ %2 = load float, ptr %arrayidx3, align 4
%add4 = fadd float %add, %2
%3 = or i64 %indvars.iv, 2
- %arrayidx7 = getelementptr inbounds float, float* %x, i64 %3
- %4 = load float, float* %arrayidx7, align 4
+ %arrayidx7 = getelementptr inbounds float, ptr %x, i64 %3
+ %4 = load float, ptr %arrayidx7, align 4
%add8 = fadd float %add4, %4
%5 = or i64 %indvars.iv, 3
- %arrayidx11 = getelementptr inbounds float, float* %x, i64 %5
- %6 = load float, float* %arrayidx11, align 4
+ %arrayidx11 = getelementptr inbounds float, ptr %x, i64 %5
+ %6 = load float, ptr %arrayidx11, align 4
%add12 = fadd float %add8, %6
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
%7 = trunc i64 %indvars.iv.next to i32
; CHECK: for.body:
; CHECK: %indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %entry ]
; CHECK: %r.029 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
-; CHECK: %arrayidx = getelementptr inbounds float, float* %x, i64 %indvar
-; CHECK: %1 = load float, float* %arrayidx, align 4
+; CHECK: %arrayidx = getelementptr inbounds float, ptr %x, i64 %indvar
+; CHECK: %1 = load float, ptr %arrayidx, align 4
; CHECK: %add = fadd float %1, %r.029
; CHECK: %indvar.next = add i64 %indvar, 1
; CHECK: %exitcond = icmp eq i32 %0, 399
ret float %add12
}
-define i32 @foo_unusedphi(i32* nocapture readonly %x) #0 {
+define i32 @foo_unusedphi(ptr nocapture readonly %x) #0 {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%r.029 = phi i32 [ 0, %entry ], [ %add12, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %x, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %0
%1 = or i64 %indvars.iv, 1
- %arrayidx3 = getelementptr inbounds i32, i32* %x, i64 %1
- %2 = load i32, i32* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr %x, i64 %1
+ %2 = load i32, ptr %arrayidx3, align 4
%add4 = add nsw i32 %add, %2
%3 = or i64 %indvars.iv, 2
- %arrayidx7 = getelementptr inbounds i32, i32* %x, i64 %3
- %4 = load i32, i32* %arrayidx7, align 4
+ %arrayidx7 = getelementptr inbounds i32, ptr %x, i64 %3
+ %4 = load i32, ptr %arrayidx7, align 4
%add8 = add nsw i32 %add4, %4
%5 = or i64 %indvars.iv, 3
- %arrayidx11 = getelementptr inbounds i32, i32* %x, i64 %5
- %6 = load i32, i32* %arrayidx11, align 4
+ %arrayidx11 = getelementptr inbounds i32, ptr %x, i64 %5
+ %6 = load i32, ptr %arrayidx11, align 4
%add12 = add nsw i32 %add8, %6
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
%7 = trunc i64 %indvars.iv.next to i32
;RUN: opt < %s -passes=loop-reroll -S | FileCheck %s
-;void foo(float * restrict a, float * restrict b, int n) {
+;void foo(ptr restrict a, ptr restrict b, int n) {
; for(int i = 0; i < n; i+=4) {
; a[i] = b[i];
; a[i+1] = b[i+1];
target triple = "armv4t--linux-gnueabi"
; Function Attrs: nounwind
-define void @foo(float* noalias nocapture %a, float* noalias nocapture readonly %b, i32 %n) #0 !dbg !4 {
+define void @foo(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, i32 %n) #0 !dbg !4 {
entry:
;CHECK-LABEL: @foo
- tail call void @llvm.dbg.value(metadata float* %a, metadata !12, metadata !22), !dbg !23
- tail call void @llvm.dbg.value(metadata float* %b, metadata !13, metadata !22), !dbg !24
+ tail call void @llvm.dbg.value(metadata ptr %a, metadata !12, metadata !22), !dbg !23
+ tail call void @llvm.dbg.value(metadata ptr %b, metadata !13, metadata !22), !dbg !24
tail call void @llvm.dbg.value(metadata i32 %n, metadata !14, metadata !22), !dbg !25
tail call void @llvm.dbg.value(metadata i32 0, metadata !15, metadata !22), !dbg !26
%cmp.30 = icmp sgt i32 %n, 0, !dbg !27
;CHECK: %indvar.next = add i32 %indvar, 1
;CHECK: icmp eq i32 %indvar
%i.031 = phi i32 [ %add13, %for.body ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds float, float* %b, i32 %i.031, !dbg !30
- %0 = bitcast float* %arrayidx to i32*, !dbg !30
- %1 = load i32, i32* %0, align 4, !dbg !30, !tbaa !33
- %arrayidx1 = getelementptr inbounds float, float* %a, i32 %i.031, !dbg !37
- %2 = bitcast float* %arrayidx1 to i32*, !dbg !38
- store i32 %1, i32* %2, align 4, !dbg !38, !tbaa !33
+ %arrayidx = getelementptr inbounds float, ptr %b, i32 %i.031, !dbg !30
+ %0 = load i32, ptr %arrayidx, align 4, !dbg !30, !tbaa !33
+ %arrayidx1 = getelementptr inbounds float, ptr %a, i32 %i.031, !dbg !37
+ store i32 %0, ptr %arrayidx1, align 4, !dbg !38, !tbaa !33
%add = or i32 %i.031, 1, !dbg !39
- %arrayidx2 = getelementptr inbounds float, float* %b, i32 %add, !dbg !40
- %3 = bitcast float* %arrayidx2 to i32*, !dbg !40
- %4 = load i32, i32* %3, align 4, !dbg !40, !tbaa !33
- %arrayidx4 = getelementptr inbounds float, float* %a, i32 %add, !dbg !41
- %5 = bitcast float* %arrayidx4 to i32*, !dbg !42
- store i32 %4, i32* %5, align 4, !dbg !42, !tbaa !33
+ %arrayidx2 = getelementptr inbounds float, ptr %b, i32 %add, !dbg !40
+ %1 = load i32, ptr %arrayidx2, align 4, !dbg !40, !tbaa !33
+ %arrayidx4 = getelementptr inbounds float, ptr %a, i32 %add, !dbg !41
+ store i32 %1, ptr %arrayidx4, align 4, !dbg !42, !tbaa !33
%add5 = or i32 %i.031, 2, !dbg !43
- %arrayidx6 = getelementptr inbounds float, float* %b, i32 %add5, !dbg !44
- %6 = bitcast float* %arrayidx6 to i32*, !dbg !44
- %7 = load i32, i32* %6, align 4, !dbg !44, !tbaa !33
- %arrayidx8 = getelementptr inbounds float, float* %a, i32 %add5, !dbg !45
- %8 = bitcast float* %arrayidx8 to i32*, !dbg !46
- store i32 %7, i32* %8, align 4, !dbg !46, !tbaa !33
+ %arrayidx6 = getelementptr inbounds float, ptr %b, i32 %add5, !dbg !44
+ %2 = load i32, ptr %arrayidx6, align 4, !dbg !44, !tbaa !33
+ %arrayidx8 = getelementptr inbounds float, ptr %a, i32 %add5, !dbg !45
+ store i32 %2, ptr %arrayidx8, align 4, !dbg !46, !tbaa !33
%add9 = or i32 %i.031, 3, !dbg !47
- %arrayidx10 = getelementptr inbounds float, float* %b, i32 %add9, !dbg !48
- %9 = bitcast float* %arrayidx10 to i32*, !dbg !48
- %10 = load i32, i32* %9, align 4, !dbg !48, !tbaa !33
- %arrayidx12 = getelementptr inbounds float, float* %a, i32 %add9, !dbg !49
- %11 = bitcast float* %arrayidx12 to i32*, !dbg !50
- store i32 %10, i32* %11, align 4, !dbg !50, !tbaa !33
+ %arrayidx10 = getelementptr inbounds float, ptr %b, i32 %add9, !dbg !48
+ %3 = load i32, ptr %arrayidx10, align 4, !dbg !48, !tbaa !33
+ %arrayidx12 = getelementptr inbounds float, ptr %a, i32 %add9, !dbg !49
+ store i32 %3, ptr %arrayidx12, align 4, !dbg !50, !tbaa !33
%add13 = add nuw nsw i32 %i.031, 4, !dbg !51
tail call void @llvm.dbg.value(metadata i32 %add13, metadata !15, metadata !22), !dbg !26
%cmp = icmp slt i32 %add13, %n, !dbg !27
; RUN: opt < %s -passes=instcombine,simplifycfg,licm -simplifycfg-require-and-preserve-domtree=1 -disable-output
target datalayout = "e-p:32:32"
-@yy_base = external global [787 x i16] ; <[787 x i16]*> [#uses=1]
-@yy_state_ptr = external global i32* ; <i32**> [#uses=3]
-@yy_state_buf = external global [16386 x i32] ; <[16386 x i32]*> [#uses=1]
-@yy_lp = external global i32 ; <i32*> [#uses=1]
+@yy_base = external global [787 x i16] ; <ptr> [#uses=1]
+@yy_state_ptr = external global ptr ; <ptr> [#uses=3]
+@yy_state_buf = external global [16386 x i32] ; <ptr> [#uses=1]
+@yy_lp = external global i32 ; <ptr> [#uses=1]
define i32 @_yylex() {
br label %loopentry.0
loopentry.0: ; preds = %else.26, %0
- store i32* getelementptr ([16386 x i32], [16386 x i32]* @yy_state_buf, i64 0, i64 0), i32** @yy_state_ptr
- %tmp.35 = load i32*, i32** @yy_state_ptr ; <i32*> [#uses=2]
- %inc.0 = getelementptr i32, i32* %tmp.35, i64 1 ; <i32*> [#uses=1]
- store i32* %inc.0, i32** @yy_state_ptr
- %tmp.36 = load i32, i32* null ; <i32> [#uses=1]
- store i32 %tmp.36, i32* %tmp.35
+ store ptr @yy_state_buf, ptr @yy_state_ptr
+ %tmp.35 = load ptr, ptr @yy_state_ptr ; <ptr> [#uses=2]
+ %inc.0 = getelementptr i32, ptr %tmp.35, i64 1 ; <ptr> [#uses=1]
+ store ptr %inc.0, ptr @yy_state_ptr
+ %tmp.36 = load i32, ptr null ; <i32> [#uses=1]
+ store i32 %tmp.36, ptr %tmp.35
br label %loopexit.2
loopexit.2: ; preds = %else.26, %loopexit.2, %loopentry.0
- store i8* null, i8** null
- %tmp.91 = load i32, i32* null ; <i32> [#uses=1]
+ store ptr null, ptr null
+ %tmp.91 = load i32, ptr null ; <i32> [#uses=1]
%tmp.92 = sext i32 %tmp.91 to i64 ; <i64> [#uses=1]
- %tmp.93 = getelementptr [787 x i16], [787 x i16]* @yy_base, i64 0, i64 %tmp.92 ; <i16*> [#uses=1]
- %tmp.94 = load i16, i16* %tmp.93 ; <i16> [#uses=1]
+ %tmp.93 = getelementptr [787 x i16], ptr @yy_base, i64 0, i64 %tmp.92 ; <ptr> [#uses=1]
+ %tmp.94 = load i16, ptr %tmp.93 ; <i16> [#uses=1]
%tmp.95 = icmp ne i16 %tmp.94, 4394 ; <i1> [#uses=1]
br i1 %tmp.95, label %loopexit.2, label %yy_find_action
yy_find_action: ; preds = %else.26, %loopexit.2
br label %loopentry.3
loopentry.3: ; preds = %then.9, %shortcirc_done.0, %yy_find_action
- %tmp.105 = load i32, i32* @yy_lp ; <i32> [#uses=1]
+ %tmp.105 = load i32, ptr @yy_lp ; <i32> [#uses=1]
%tmp.106 = icmp ne i32 %tmp.105, 0 ; <i1> [#uses=1]
br i1 %tmp.106, label %shortcirc_next.0, label %shortcirc_done.0
shortcirc_next.0: ; preds = %loopentry.3
- %tmp.114 = load i16, i16* null ; <i16> [#uses=1]
+ %tmp.114 = load i16, ptr null ; <i16> [#uses=1]
%tmp.115 = sext i16 %tmp.114 to i32 ; <i32> [#uses=1]
%tmp.116 = icmp slt i32 0, %tmp.115 ; <i1> [#uses=1]
br label %shortcirc_done.0
%shortcirc_val.0 = phi i1 [ false, %loopentry.3 ], [ %tmp.116, %shortcirc_next.0 ] ; <i1> [#uses=1]
br i1 %shortcirc_val.0, label %else.0, label %loopentry.3
else.0: ; preds = %shortcirc_done.0
- %tmp.144 = load i32, i32* null ; <i32> [#uses=1]
+ %tmp.144 = load i32, ptr null ; <i32> [#uses=1]
%tmp.145 = and i32 %tmp.144, 8192 ; <i32> [#uses=1]
%tmp.146 = icmp ne i32 %tmp.145, 0 ; <i1> [#uses=1]
br i1 %tmp.146, label %then.9, label %else.26
; RUN: opt < %s -passes=licm -disable-output
-@G = weak global i32 0 ; <i32*> [#uses=7]
+@G = weak global i32 0 ; <ptr> [#uses=7]
define i32 @main() {
entry:
- store i32 123, i32* @G
+ store i32 123, ptr @G
br label %loopentry.i
loopentry.i: ; preds = %endif.1.i, %entry
- %tmp.0.i = load i32, i32* @G ; <i32> [#uses=1]
+ %tmp.0.i = load i32, ptr @G ; <i32> [#uses=1]
%tmp.1.i = icmp eq i32 %tmp.0.i, 123 ; <i1> [#uses=1]
br i1 %tmp.1.i, label %Out.i, label %endif.0.i
endif.0.i: ; preds = %loopentry.i
- %tmp.3.i = load i32, i32* @G ; <i32> [#uses=1]
+ %tmp.3.i = load i32, ptr @G ; <i32> [#uses=1]
%tmp.4.i = icmp eq i32 %tmp.3.i, 126 ; <i1> [#uses=1]
br i1 %tmp.4.i, label %ExitBlock.i, label %endif.1.i
endif.1.i: ; preds = %endif.0.i
- %tmp.6.i = load i32, i32* @G ; <i32> [#uses=1]
+ %tmp.6.i = load i32, ptr @G ; <i32> [#uses=1]
%inc.i = add i32 %tmp.6.i, 1 ; <i32> [#uses=1]
- store i32 %inc.i, i32* @G
+ store i32 %inc.i, ptr @G
br label %loopentry.i
Out.i: ; preds = %loopentry.i
- store i32 0, i32* @G
+ store i32 0, ptr @G
br label %ExitBlock.i
ExitBlock.i: ; preds = %Out.i, %endif.0.i
- %tmp.7.i = load i32, i32* @G ; <i32> [#uses=1]
+ %tmp.7.i = load i32, ptr @G ; <i32> [#uses=1]
ret i32 %tmp.7.i
}
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-s0:0:64-f80:32:32"
target triple = "i686-pc-mingw32"
-define void @func() personality i32 (...)* @__gxx_personality_v0 {
+define void @func() personality ptr @__gxx_personality_v0 {
bb_init:
br label %bb_main
br label %bb_main
invcont17.normaldest.normaldest: ; No predecessors!
- %exn = landingpad {i8*, i32}
- catch i8* null
- store i32 %tmp23, i32* undef
+ %exn = landingpad {ptr, i32}
+ catch ptr null
+ store i32 %tmp23, ptr undef
br label %bb_main
}
; RUN: opt < %s -passes=loop-simplify -S | FileCheck %s
; PR11575
-@catchtypeinfo = external unnamed_addr constant { i8*, i8*, i8* }
+@catchtypeinfo = external unnamed_addr constant { ptr, ptr, ptr }
-define void @main() uwtable ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @main() uwtable ssp personality ptr @__gxx_personality_v0 {
entry:
invoke void @f1()
to label %try.cont19 unwind label %catch
; CHECK: br label %catch
catch: ; preds = %if.else, %entry
- %0 = landingpad { i8*, i32 }
- catch i8* bitcast ({ i8*, i8*, i8* }* @catchtypeinfo to i8*)
+ %0 = landingpad { ptr, i32 }
+ catch ptr @catchtypeinfo
invoke void @f3()
to label %if.else unwind label %eh.resume
ret void
eh.resume: ; preds = %catch
- %1 = landingpad { i8*, i32 }
+ %1 = landingpad { ptr, i32 }
cleanup
- catch i8* bitcast ({ i8*, i8*, i8* }* @catchtypeinfo to i8*)
- resume { i8*, i32 } undef
+ catch ptr @catchtypeinfo
+ resume { ptr, i32 } undef
}
declare i32 @__gxx_personality_v0(...)
; CHECK: indirectbr {{.*}}label %while.cond112
define fastcc void @build_regex_nfa() nounwind uwtable ssp {
entry:
- indirectbr i8* blockaddress(@build_regex_nfa, %while.cond), [label %while.cond]
+ indirectbr ptr blockaddress(@build_regex_nfa, %while.cond), [label %while.cond]
while.cond: ; preds = %if.then439, %entry
- indirectbr i8* blockaddress(@build_regex_nfa, %sw.bb103), [label %do.body785, label %sw.bb103]
+ indirectbr ptr blockaddress(@build_regex_nfa, %sw.bb103), [label %do.body785, label %sw.bb103]
sw.bb103: ; preds = %while.body
- indirectbr i8* blockaddress(@build_regex_nfa, %while.cond112), [label %while.cond112]
+ indirectbr ptr blockaddress(@build_regex_nfa, %while.cond112), [label %while.cond112]
while.cond112: ; preds = %for.body, %for.cond.preheader, %sw.bb103
%pc.0 = phi i8 [ -1, %sw.bb103 ], [ 0, %for.body ], [ %pc.0, %for.cond.preheader ]
- indirectbr i8* blockaddress(@build_regex_nfa, %Lsetdone), [label %sw.bb118, label %Lsetdone]
+ indirectbr ptr blockaddress(@build_regex_nfa, %Lsetdone), [label %sw.bb118, label %Lsetdone]
sw.bb118: ; preds = %while.cond112
- indirectbr i8* blockaddress(@build_regex_nfa, %for.cond.preheader), [label %Lerror.loopexit, label %for.cond.preheader]
+ indirectbr ptr blockaddress(@build_regex_nfa, %for.cond.preheader), [label %Lerror.loopexit, label %for.cond.preheader]
for.cond.preheader: ; preds = %sw.bb118
- indirectbr i8* blockaddress(@build_regex_nfa, %for.body), [label %while.cond112, label %for.body]
+ indirectbr ptr blockaddress(@build_regex_nfa, %for.body), [label %while.cond112, label %for.body]
for.body: ; preds = %for.body, %for.cond.preheader
- indirectbr i8* blockaddress(@build_regex_nfa, %for.body), [label %while.cond112, label %for.body]
+ indirectbr ptr blockaddress(@build_regex_nfa, %for.body), [label %while.cond112, label %for.body]
Lsetdone: ; preds = %while.cond112
unreachable
for.cond: ; preds = %for.inc7, %entry
%storemerge = phi i32 [ 0, %entry ], [ %inc8, %for.inc7 ]
%f.0 = phi i32 [ undef, %entry ], [ %f.1, %for.inc7 ]
- store i32 %storemerge, i32* @d, align 4
+ store i32 %storemerge, ptr @d, align 4
%cmp = icmp slt i32 %storemerge, 1
br i1 %cmp, label %for.cond1, label %for.end9
for.cond1: ; preds = %for.cond, %for.body3
%storemerge1 = phi i32 [ %inc, %for.body3 ], [ 0, %for.cond ]
%f.1 = phi i32 [ %xor, %for.body3 ], [ %f.0, %for.cond ]
- store i32 %storemerge1, i32* @a, align 4
+ store i32 %storemerge1, ptr @a, align 4
%cmp2 = icmp slt i32 %storemerge1, 1
br i1 %cmp2, label %for.body3, label %for.inc7
for.body3: ; preds = %for.cond1
- %0 = load i32, i32* @c, align 4
+ %0 = load i32, ptr @c, align 4
%cmp4 = icmp sge i32 %storemerge1, %0
%conv = zext i1 %cmp4 to i32
- %1 = load i32, i32* @d, align 4
+ %1 = load i32, ptr @d, align 4
%add = add nsw i32 %conv, %1
%sext = shl i32 %add, 16
%conv6 = ashr exact i32 %sext, 16
br label %for.cond1
for.inc7: ; preds = %for.cond1
- %2 = load i32, i32* @d, align 4
+ %2 = load i32, ptr @d, align 4
%inc8 = add nsw i32 %2, 1
br label %for.cond
br i1 %cmp10, label %if.then, label %if.end
if.then: ; preds = %for.end9
- store i32 0, i32* @b, align 4
+ store i32 0, ptr @b, align 4
br label %if.end
if.end: ; preds = %if.then, %for.end9
; Test a case where we have multiple exit blocks as successors of a single loop
; block that need to be made dedicated exit blocks. We also have multiple
; exiting edges to one of the exit blocks that all should be rewritten.
-define void @test_multiple_exits_from_single_block(i8 %a, i8* %b.ptr) {
+define void @test_multiple_exits_from_single_block(i8 %a, ptr %b.ptr) {
; CHECK-LABEL: @test_multiple_exits_from_single_block(
; CHECK-NEXT: entry:
; CHECK-NEXT: switch i8 [[A:%.*]], label [[LOOP_PREHEADER:%.*]] [
; CHECK: loop.preheader:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[B:%.*]] = load volatile i8, i8* [[B_PTR:%.*]]
+; CHECK-NEXT: [[B:%.*]] = load volatile i8, ptr [[B_PTR:%.*]]
; CHECK-NEXT: switch i8 [[B]], label [[LOOP_BACKEDGE:%.*]] [
; CHECK-NEXT: i8 0, label [[EXIT_A_LOOPEXIT:%.*]]
; CHECK-NEXT: i8 1, label [[EXIT_B_LOOPEXIT:%.*]]
]
loop:
- %b = load volatile i8, i8* %b.ptr
+ %b = load volatile i8, ptr %b.ptr
switch i8 %b, label %loop [
i8 0, label %exit.a
i8 1, label %exit.b
; Check that we leave already dedicated exits alone when forming dedicated exit
; blocks.
-define void @test_pre_existing_dedicated_exits(i1 %a, i1* %ptr) {
+define void @test_pre_existing_dedicated_exits(i1 %a, ptr %ptr) {
; CHECK-LABEL: @test_pre_existing_dedicated_exits(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[A:%.*]], label [[LOOP_PH:%.*]], label [[NON_DEDICATED_EXIT:%.*]]
; CHECK: loop.ph:
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK: loop.header:
-; CHECK-NEXT: [[C1:%.*]] = load volatile i1, i1* [[PTR:%.*]]
+; CHECK-NEXT: [[C1:%.*]] = load volatile i1, ptr [[PTR:%.*]]
; CHECK-NEXT: br i1 [[C1]], label [[LOOP_BODY1:%.*]], label [[DEDICATED_EXIT1:%.*]]
; CHECK: loop.body1:
-; CHECK-NEXT: [[C2:%.*]] = load volatile i1, i1* [[PTR]]
+; CHECK-NEXT: [[C2:%.*]] = load volatile i1, ptr [[PTR]]
; CHECK-NEXT: br i1 [[C2]], label [[LOOP_BODY2:%.*]], label [[NON_DEDICATED_EXIT_LOOPEXIT:%.*]]
; CHECK: loop.body2:
-; CHECK-NEXT: [[C3:%.*]] = load volatile i1, i1* [[PTR]]
+; CHECK-NEXT: [[C3:%.*]] = load volatile i1, ptr [[PTR]]
; CHECK-NEXT: br i1 [[C3]], label [[LOOP_BACKEDGE:%.*]], label [[DEDICATED_EXIT2:%.*]]
; CHECK: loop.backedge:
; CHECK-NEXT: br label [[LOOP_HEADER]]
br label %loop.header
loop.header:
- %c1 = load volatile i1, i1* %ptr
+ %c1 = load volatile i1, ptr %ptr
br i1 %c1, label %loop.body1, label %dedicated_exit1
loop.body1:
- %c2 = load volatile i1, i1* %ptr
+ %c2 = load volatile i1, ptr %ptr
br i1 %c2, label %loop.body2, label %non_dedicated_exit
loop.body2:
- %c3 = load volatile i1, i1* %ptr
+ %c3 = load volatile i1, ptr %ptr
br i1 %c3, label %loop.backedge, label %dedicated_exit2
loop.backedge:
; Check that we form what dedicated exits we can even when some exits are
; reached via indirectbr which precludes forming dedicated exits.
-define void @test_form_some_dedicated_exits_despite_indirectbr(i8 %a, i8* %ptr, i8** %addr.ptr) {
+define void @test_form_some_dedicated_exits_despite_indirectbr(i8 %a, ptr %ptr, ptr %addr.ptr) {
; CHECK-LABEL: @test_form_some_dedicated_exits_despite_indirectbr(
; CHECK-NEXT: entry:
; CHECK-NEXT: switch i8 [[A:%.*]], label [[LOOP_PH:%.*]] [
; CHECK: loop.ph:
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK: loop.header:
-; CHECK-NEXT: [[ADDR1:%.*]] = load volatile i8*, i8** [[ADDR_PTR:%.*]]
-; CHECK-NEXT: indirectbr i8* [[ADDR1]], [label [[LOOP_BODY1:%.*]], label %exit.a]
+; CHECK-NEXT: [[ADDR1:%.*]] = load volatile ptr, ptr [[ADDR_PTR:%.*]]
+; CHECK-NEXT: indirectbr ptr [[ADDR1]], [label [[LOOP_BODY1:%.*]], label %exit.a]
; CHECK: loop.body1:
-; CHECK-NEXT: [[B:%.*]] = load volatile i8, i8* [[PTR:%.*]]
+; CHECK-NEXT: [[B:%.*]] = load volatile i8, ptr [[PTR:%.*]]
; CHECK-NEXT: switch i8 [[B]], label [[LOOP_BODY2:%.*]] [
; CHECK-NEXT: i8 0, label [[EXIT_A]]
; CHECK-NEXT: i8 1, label [[EXIT_B_LOOPEXIT:%.*]]
; CHECK-NEXT: i8 2, label [[EXIT_C]]
; CHECK-NEXT: ]
; CHECK: loop.body2:
-; CHECK-NEXT: [[ADDR2:%.*]] = load volatile i8*, i8** [[ADDR_PTR]]
-; CHECK-NEXT: indirectbr i8* [[ADDR2]], [label [[LOOP_BACKEDGE:%.*]], label %exit.c]
+; CHECK-NEXT: [[ADDR2:%.*]] = load volatile ptr, ptr [[ADDR_PTR]]
+; CHECK-NEXT: indirectbr ptr [[ADDR2]], [label [[LOOP_BACKEDGE:%.*]], label %exit.c]
; CHECK: loop.backedge:
; CHECK-NEXT: br label [[LOOP_HEADER]]
; CHECK: exit.a:
br label %loop.header
loop.header:
- %addr1 = load volatile i8*, i8** %addr.ptr
- indirectbr i8* %addr1, [label %loop.body1, label %exit.a]
+ %addr1 = load volatile ptr, ptr %addr.ptr
+ indirectbr ptr %addr1, [label %loop.body1, label %exit.a]
loop.body1:
- %b = load volatile i8, i8* %ptr
+ %b = load volatile i8, ptr %ptr
switch i8 %b, label %loop.body2 [
i8 0, label %exit.a
i8 1, label %exit.b
]
loop.body2:
- %addr2 = load volatile i8*, i8** %addr.ptr
- indirectbr i8* %addr2, [label %loop.backedge, label %exit.c]
+ %addr2 = load volatile ptr, ptr %addr.ptr
+ indirectbr ptr %addr2, [label %loop.backedge, label %exit.c]
loop.backedge:
br label %loop.header
%union.anon = type { i32 }
%"Length" = type <{ %union.anon, i8, i8, i8, i8 }>
-declare void @bar(%"Length"*) #3
-@catchtypeinfo = external unnamed_addr constant { i8*, i8*, i8* }
+declare void @bar(ptr) #3
+@catchtypeinfo = external unnamed_addr constant { ptr, ptr, ptr }
declare i32 @__gxx_personality_v0(...)
declare void @f1()
declare void @f2()
; CHECK: for.end.loopexit:
; CHECK-NEXT: br label %for.end, !dbg [[LOOPEXIT_LOC:![0-9]+]]
-define linkonce_odr hidden void @foo(%"Length"* %begin, %"Length"* %end) nounwind ssp uwtable align 2 !dbg !6 {
+define linkonce_odr hidden void @foo(ptr %begin, ptr %end) nounwind ssp uwtable align 2 !dbg !6 {
entry:
- %cmp.4 = icmp eq %"Length"* %begin, %end, !dbg !7
+ %cmp.4 = icmp eq ptr %begin, %end, !dbg !7
br i1 %cmp.4, label %for.end, label %for.body, !dbg !8
for.body: ; preds = %entry, %length.exit
- %begin.sink5 = phi %"Length"* [ %incdec.ptr, %length.exit ], [ %begin, %entry ]
- tail call void @llvm.dbg.value(metadata %"Length"* %begin.sink5, metadata !15, metadata !16), !dbg !17
- %m_type.i.i.i = getelementptr inbounds %"Length", %"Length"* %begin.sink5, i64 0, i32 2, !dbg !9
- %0 = load i8, i8* %m_type.i.i.i, align 1, !dbg !9
+ %begin.sink5 = phi ptr [ %incdec.ptr, %length.exit ], [ %begin, %entry ]
+ tail call void @llvm.dbg.value(metadata ptr %begin.sink5, metadata !15, metadata !16), !dbg !17
+ %m_type.i.i.i = getelementptr inbounds %"Length", ptr %begin.sink5, i64 0, i32 2, !dbg !9
+ %0 = load i8, ptr %m_type.i.i.i, align 1, !dbg !9
%cmp.i.i = icmp eq i8 %0, 9, !dbg !7
br i1 %cmp.i.i, label %if.then.i, label %length.exit, !dbg !8
if.then.i: ; preds = %for.body
- tail call void @bar(%"Length"* %begin.sink5) #7, !dbg !10
+ tail call void @bar(ptr %begin.sink5) #7, !dbg !10
br label %length.exit, !dbg !10
length.exit: ; preds = %for.body, %if.then.i
- %incdec.ptr = getelementptr inbounds %"Length", %"Length"* %begin.sink5, i64 1, !dbg !11
- %cmp = icmp eq %"Length"* %incdec.ptr, %end, !dbg !7
+ %incdec.ptr = getelementptr inbounds %"Length", ptr %begin.sink5, i64 1, !dbg !11
+ %cmp = icmp eq ptr %incdec.ptr, %end, !dbg !7
br i1 %cmp, label %for.end, label %for.body, !dbg !8
for.end: ; preds = %length.exit, %entry
; CHECK: catch.preheader.split-lp:
; CHECK: br label %catch, !dbg [[LPAD_PREHEADER_LOC]]
-define void @with_landingpad() uwtable ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @with_landingpad() uwtable ssp personality ptr @__gxx_personality_v0 {
entry:
invoke void @f1() to label %try.cont19 unwind label %catch, !dbg !13
catch: ; preds = %if.else, %entry
- %0 = landingpad { i8*, i32 }
- catch i8* bitcast ({ i8*, i8*, i8* }* @catchtypeinfo to i8*), !dbg !13
+ %0 = landingpad { ptr, i32 }
+ catch ptr @catchtypeinfo, !dbg !13
invoke void @f3() to label %if.else unwind label %eh.resume, !dbg !13
if.else: ; preds = %catch
ret void, !dbg !13
eh.resume: ; preds = %catch
- %1 = landingpad { i8*, i32 }
- cleanup catch i8* bitcast ({ i8*, i8*, i8* }* @catchtypeinfo to i8*), !dbg !13
- resume { i8*, i32 } undef, !dbg !13
+ %1 = landingpad { ptr, i32 }
+ cleanup catch ptr @catchtypeinfo, !dbg !13
+ resume { ptr, i32 } undef, !dbg !13
}
; Function Attrs: nounwind readnone
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64le-unknown-linux"
-define fastcc void @do_update_md([3 x float]* nocapture readonly %x) #0 {
+define fastcc void @do_update_md(ptr nocapture readonly %x) #0 {
entry:
br i1 undef, label %if.end365, label %lor.lhs.false134
; return ret;
; }
-define dso_local i32 @"foo"(i32 %count, i32* nocapture readonly %bar) local_unnamed_addr !dbg !8 {
+define dso_local i32 @"foo"(i32 %count, ptr nocapture readonly %bar) local_unnamed_addr !dbg !8 {
entry:
%cmp = icmp sgt i32 %count, 255, !dbg !16
br i1 %cmp, label %return, label %for.cond.preheader, !dbg !16
%j.08 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%ret.07 = phi i32 [ %count, %for.body.lr.ph ], [ %add2, %for.body ]
%0 = zext i32 %j.08 to i64, !dbg !22
- %arrayidx = getelementptr inbounds i32, i32* %bar, i64 %0, !dbg !22
- %1 = load i32, i32* %arrayidx, align 4, !dbg !22
+ %arrayidx = getelementptr inbounds i32, ptr %bar, i64 %0, !dbg !22
+ %1 = load i32, ptr %arrayidx, align 4, !dbg !22
%add2 = add nsw i32 %1, %ret.07, !dbg !27
%inc = add nuw nsw i32 %j.08, 1, !dbg !28
%cmp1 = icmp slt i32 %inc, %count, !dbg !19
; CHECK: bb5: ; preds = %bb1
; CHECK-NEXT: br label %bb1{{$}}
-define void @foo(i8* %p) nounwind {
+define void @foo(ptr %p) nounwind {
bb:
br label %bb1
bb1: ; preds = %bb5, %bb1, %bb
- indirectbr i8* %p, [label %bb6, label %bb7, label %bb1, label %bb2, label %bb3, label %bb5, label %bb4]
+ indirectbr ptr %p, [label %bb6, label %bb7, label %bb1, label %bb2, label %bb3, label %bb5, label %bb4]
bb2: ; preds = %bb1
ret void
; RUN: opt < %s -passes=loop-simplify,lcssa -verify-loop-info -verify-dom-info -S \
-; RUN: | grep -F "indirectbr i8* %x, [label %L0, label %L1]" \
+; RUN: | grep -F "indirectbr ptr %x, [label %L0, label %L1]" \
; RUN: | count 6
; LoopSimplify should not try to transform loops when indirectbr is involved.
-define void @entry(i8* %x) {
+define void @entry(ptr %x) {
entry:
- indirectbr i8* %x, [ label %L0, label %L1 ]
+ indirectbr ptr %x, [ label %L0, label %L1 ]
L0:
br label %L0
ret void
}
-define void @backedge(i8* %x) {
+define void @backedge(ptr %x) {
entry:
br label %L0
br label %L1
L1:
- indirectbr i8* %x, [ label %L0, label %L1 ]
+ indirectbr ptr %x, [ label %L0, label %L1 ]
}
-define i64 @exit(i8* %x) {
+define i64 @exit(ptr %x) {
entry:
br label %L2
L2:
%z = bitcast i64 0 to i64
- indirectbr i8* %x, [ label %L0, label %L1 ]
+ indirectbr ptr %x, [ label %L0, label %L1 ]
L0:
br label %L2
ret i64 %z
}
-define i64 @criticalexit(i8* %x, i1 %a) {
+define i64 @criticalexit(ptr %x, i1 %a) {
entry:
br i1 %a, label %L1, label %L2
L2:
%z = bitcast i64 0 to i64
- indirectbr i8* %x, [ label %L0, label %L1 ]
+ indirectbr ptr %x, [ label %L0, label %L1 ]
L0:
br label %L2
ret i64 %y
}
-define i64 @exit_backedge(i8* %x) {
+define i64 @exit_backedge(ptr %x) {
entry:
br label %L0
L0:
%z = bitcast i64 0 to i64
- indirectbr i8* %x, [ label %L0, label %L1 ]
+ indirectbr ptr %x, [ label %L0, label %L1 ]
L1:
ret i64 %z
}
-define i64 @criticalexit_backedge(i8* %x, i1 %a) {
+define i64 @criticalexit_backedge(ptr %x, i1 %a) {
entry:
br i1 %a, label %L0, label %L1
L0:
%z = bitcast i64 0 to i64
- indirectbr i8* %x, [ label %L0, label %L1 ]
+ indirectbr ptr %x, [ label %L0, label %L1 ]
L1:
%y = phi i64 [ %z, %L0 ], [ 1, %entry ]
br i1 undef, label %while.body, label %while.end
while.body:
- indirectbr i8* undef, [label %end_opcode, label %end_opcode]
+ indirectbr ptr undef, [label %end_opcode, label %end_opcode]
end_opcode:
br i1 false, label %end_opcode, label %while.cond
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n32:64"
-define float @test1(float* %pTmp1, float* %peakWeight, i32 %bandEdgeIndex) nounwind {
+define float @test1(ptr %pTmp1, ptr %peakWeight, i32 %bandEdgeIndex) nounwind {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[T0:%.*]] = load float, float* [[PEAKWEIGHT:%.*]], align 4
+; CHECK-NEXT: [[T0:%.*]] = load float, ptr [[PEAKWEIGHT:%.*]], align 4
; CHECK-NEXT: [[T11:%.*]] = add i32 [[BANDEDGEINDEX:%.*]], -1
; CHECK-NEXT: [[T121:%.*]] = icmp sgt i32 [[T11]], 0
; CHECK-NEXT: br i1 [[T121]], label [[BB_LR_PH:%.*]], label [[BB3:%.*]]
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[BB_LR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[BB]] ]
; CHECK-NEXT: [[DISTERBHI_04:%.*]] = phi float [ 0.000000e+00, [[BB_LR_PH]] ], [ [[T4:%.*]], [[BB]] ]
; CHECK-NEXT: [[PEAKCOUNT_02:%.*]] = phi float [ [[T0]], [[BB_LR_PH]] ], [ [[T9:%.*]], [[BB]] ]
-; CHECK-NEXT: [[T2:%.*]] = getelementptr float, float* [[PTMP1:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[T3:%.*]] = load float, float* [[T2]], align 4
+; CHECK-NEXT: [[T2:%.*]] = getelementptr float, ptr [[PTMP1:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[T3:%.*]] = load float, ptr [[T2]], align 4
; CHECK-NEXT: [[T4]] = fadd float [[T3]], [[DISTERBHI_04]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[T7:%.*]] = getelementptr float, float* [[PEAKWEIGHT]], i64 [[INDVARS_IV_NEXT]]
-; CHECK-NEXT: [[T8:%.*]] = load float, float* [[T7]], align 4
+; CHECK-NEXT: [[T7:%.*]] = getelementptr float, ptr [[PEAKWEIGHT]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[T8:%.*]] = load float, ptr [[T7]], align 4
; CHECK-NEXT: [[T9]] = fadd float [[T8]], [[PEAKCOUNT_02]]
; CHECK-NEXT: [[T10:%.*]] = fcmp olt float [[T4]], 2.500000e+00
; CHECK-NEXT: [[T12:%.*]] = icmp sgt i64 [[TMP0]], [[INDVARS_IV_NEXT]]
; CHECK-NEXT: ret float [[T13]]
;
entry:
- %t0 = load float, float* %peakWeight, align 4
+ %t0 = load float, ptr %peakWeight, align 4
br label %bb1
bb: ; preds = %bb2
%t1 = sext i32 %hiPart.0 to i64
- %t2 = getelementptr float, float* %pTmp1, i64 %t1
- %t3 = load float, float* %t2, align 4
+ %t2 = getelementptr float, ptr %pTmp1, i64 %t1
+ %t3 = load float, ptr %t2, align 4
%t4 = fadd float %t3, %distERBhi.0
%t5 = add i32 %hiPart.0, 1
%t6 = sext i32 %t5 to i64
- %t7 = getelementptr float, float* %peakWeight, i64 %t6
- %t8 = load float, float* %t7, align 4
+ %t7 = getelementptr float, ptr %peakWeight, i64 %t6
+ %t8 = load float, ptr %t7, align 4
%t9 = fadd float %t8, %peakCount.0
br label %bb1
; Same test as above.
; This would crash because we assumed TTI was available to process the metadata.
-define float @merge_branches_profile_metadata(float* %pTmp1, float* %peakWeight, i32 %bandEdgeIndex) nounwind {
+define float @merge_branches_profile_metadata(ptr %pTmp1, ptr %peakWeight, i32 %bandEdgeIndex) nounwind {
; CHECK-LABEL: @merge_branches_profile_metadata(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[T0:%.*]] = load float, float* [[PEAKWEIGHT:%.*]], align 4
+; CHECK-NEXT: [[T0:%.*]] = load float, ptr [[PEAKWEIGHT:%.*]], align 4
; CHECK-NEXT: [[T11:%.*]] = add i32 [[BANDEDGEINDEX:%.*]], -1
; CHECK-NEXT: [[T121:%.*]] = icmp sgt i32 [[T11]], 0
; CHECK-NEXT: br i1 [[T121]], label [[BB_LR_PH:%.*]], label [[BB3:%.*]], !prof !0
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[BB_LR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[BB]] ]
; CHECK-NEXT: [[DISTERBHI_04:%.*]] = phi float [ 0.000000e+00, [[BB_LR_PH]] ], [ [[T4:%.*]], [[BB]] ]
; CHECK-NEXT: [[PEAKCOUNT_02:%.*]] = phi float [ [[T0]], [[BB_LR_PH]] ], [ [[T9:%.*]], [[BB]] ]
-; CHECK-NEXT: [[T2:%.*]] = getelementptr float, float* [[PTMP1:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[T3:%.*]] = load float, float* [[T2]], align 4
+; CHECK-NEXT: [[T2:%.*]] = getelementptr float, ptr [[PTMP1:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[T3:%.*]] = load float, ptr [[T2]], align 4
; CHECK-NEXT: [[T4]] = fadd float [[T3]], [[DISTERBHI_04]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[T7:%.*]] = getelementptr float, float* [[PEAKWEIGHT]], i64 [[INDVARS_IV_NEXT]]
-; CHECK-NEXT: [[T8:%.*]] = load float, float* [[T7]], align 4
+; CHECK-NEXT: [[T7:%.*]] = getelementptr float, ptr [[PEAKWEIGHT]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT: [[T8:%.*]] = load float, ptr [[T7]], align 4
; CHECK-NEXT: [[T9]] = fadd float [[T8]], [[PEAKCOUNT_02]]
; CHECK-NEXT: [[T10:%.*]] = fcmp olt float [[T4]], 2.500000e+00
; CHECK-NEXT: [[T12:%.*]] = icmp sgt i64 [[TMP0]], [[INDVARS_IV_NEXT]]
; CHECK-NEXT: ret float [[T13]]
;
entry:
- %t0 = load float, float* %peakWeight, align 4
+ %t0 = load float, ptr %peakWeight, align 4
br label %bb1
bb: ; preds = %bb2
%t1 = sext i32 %hiPart.0 to i64
- %t2 = getelementptr float, float* %pTmp1, i64 %t1
- %t3 = load float, float* %t2, align 4
+ %t2 = getelementptr float, ptr %pTmp1, i64 %t1
+ %t3 = load float, ptr %t2, align 4
%t4 = fadd float %t3, %distERBhi.0
%t5 = add i32 %hiPart.0, 1
%t6 = sext i32 %t5 to i64
- %t7 = getelementptr float, float* %peakWeight, i64 %t6
- %t8 = load float, float* %t7, align 4
+ %t7 = getelementptr float, ptr %peakWeight, i64 %t6
+ %t8 = load float, ptr %t7, align 4
%t9 = fadd float %t8, %peakCount.0
br label %bb1
br i1 undef, label %for.cond142.preheader.us, label %for.end174.us
for.body145.us:
- %arrayidx163.us = getelementptr inbounds %struct.Params, %struct.Params* undef, i64 0, i32 0, i64 %idxprom130, i64 %idxprom146.us
+ %arrayidx163.us = getelementptr inbounds %struct.Params, ptr undef, i64 0, i32 0, i64 %idxprom130, i64 %idxprom146.us
br i1 undef, label %for.body145.us, label %for.inc172.us
for.cond142.preheader.us:
; Loop Simplify should turn phi nodes like X = phi [X, Y] into just Y, eliminating them.
; RUN: opt < %s -passes=loop-simplify -S | grep phi | count 6
-@A = weak global [3000000 x i32] zeroinitializer ; <[3000000 x i32]*> [#uses=1]
-@B = weak global [20000 x i32] zeroinitializer ; <[20000 x i32]*> [#uses=1]
-@C = weak global [100 x i32] zeroinitializer ; <[100 x i32]*> [#uses=1]
-@Z = weak global i32 0 ; <i32*> [#uses=2]
+@A = weak global [3000000 x i32] zeroinitializer ; <ptr> [#uses=1]
+@B = weak global [20000 x i32] zeroinitializer ; <ptr> [#uses=1]
+@C = weak global [100 x i32] zeroinitializer ; <ptr> [#uses=1]
+@Z = weak global i32 0 ; <ptr> [#uses=2]
define i32 @main() {
entry:
br label %loopentry.1
loopentry.1: ; preds = %loopexit.1, %entry
%indvar20 = phi i32 [ 0, %entry ], [ %indvar.next21, %loopexit.1 ] ; <i32> [#uses=1]
- %a.1 = phi i32* [ getelementptr ([3000000 x i32], [3000000 x i32]* @A, i32 0, i32 0), %entry ], [ %inc.0, %loopexit.1 ] ; <i32*> [#uses=1]
+ %a.1 = phi ptr [ @A, %entry ], [ %inc.0, %loopexit.1 ] ; <ptr> [#uses=1]
br label %no_exit.2
no_exit.2: ; preds = %loopexit.2, %no_exit.2, %loopentry.1
- %a.0.4.ph = phi i32* [ %a.1, %loopentry.1 ], [ %inc.0, %loopexit.2 ], [ %a.0.4.ph, %no_exit.2 ] ; <i32*> [#uses=3]
- %b.1.4.ph = phi i32* [ getelementptr ([20000 x i32], [20000 x i32]* @B, i32 0, i32 0), %loopentry.1 ], [ %inc.1, %loopexit.2 ], [ %b.1.4.ph, %no_exit.2 ] ; <i32*> [#uses=3]
+ %a.0.4.ph = phi ptr [ %a.1, %loopentry.1 ], [ %inc.0, %loopexit.2 ], [ %a.0.4.ph, %no_exit.2 ] ; <ptr> [#uses=3]
+ %b.1.4.ph = phi ptr [ @B, %loopentry.1 ], [ %inc.1, %loopexit.2 ], [ %b.1.4.ph, %no_exit.2 ] ; <ptr> [#uses=3]
%indvar17 = phi i32 [ 0, %loopentry.1 ], [ %indvar.next18, %loopexit.2 ], [ %indvar17, %no_exit.2 ] ; <i32> [#uses=2]
%indvar = phi i32 [ %indvar.next, %no_exit.2 ], [ 0, %loopexit.2 ], [ 0, %loopentry.1 ] ; <i32> [#uses=5]
%b.1.4.rec = bitcast i32 %indvar to i32 ; <i32> [#uses=1]
%gep.upgrd.1 = zext i32 %indvar to i64 ; <i64> [#uses=1]
- %c.2.4 = getelementptr [100 x i32], [100 x i32]* @C, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
+ %c.2.4 = getelementptr [100 x i32], ptr @C, i32 0, i64 %gep.upgrd.1 ; <ptr> [#uses=1]
%gep.upgrd.2 = zext i32 %indvar to i64 ; <i64> [#uses=1]
- %a.0.4 = getelementptr i32, i32* %a.0.4.ph, i64 %gep.upgrd.2 ; <i32*> [#uses=1]
+ %a.0.4 = getelementptr i32, ptr %a.0.4.ph, i64 %gep.upgrd.2 ; <ptr> [#uses=1]
%gep.upgrd.3 = zext i32 %indvar to i64 ; <i64> [#uses=1]
- %b.1.4 = getelementptr i32, i32* %b.1.4.ph, i64 %gep.upgrd.3 ; <i32*> [#uses=1]
+ %b.1.4 = getelementptr i32, ptr %b.1.4.ph, i64 %gep.upgrd.3 ; <ptr> [#uses=1]
%inc.0.rec = add i32 %b.1.4.rec, 1 ; <i32> [#uses=2]
- %inc.0 = getelementptr i32, i32* %a.0.4.ph, i32 %inc.0.rec ; <i32*> [#uses=2]
- %tmp.13 = load i32, i32* %a.0.4 ; <i32> [#uses=1]
- %inc.1 = getelementptr i32, i32* %b.1.4.ph, i32 %inc.0.rec ; <i32*> [#uses=1]
- %tmp.15 = load i32, i32* %b.1.4 ; <i32> [#uses=1]
- %tmp.18 = load i32, i32* %c.2.4 ; <i32> [#uses=1]
+ %inc.0 = getelementptr i32, ptr %a.0.4.ph, i32 %inc.0.rec ; <ptr> [#uses=2]
+ %tmp.13 = load i32, ptr %a.0.4 ; <i32> [#uses=1]
+ %inc.1 = getelementptr i32, ptr %b.1.4.ph, i32 %inc.0.rec ; <ptr> [#uses=1]
+ %tmp.15 = load i32, ptr %b.1.4 ; <i32> [#uses=1]
+ %tmp.18 = load i32, ptr %c.2.4 ; <i32> [#uses=1]
%tmp.16 = mul i32 %tmp.15, %tmp.13 ; <i32> [#uses=1]
%tmp.19 = mul i32 %tmp.16, %tmp.18 ; <i32> [#uses=1]
- %tmp.20 = load i32, i32* @Z ; <i32> [#uses=1]
+ %tmp.20 = load i32, ptr @Z ; <i32> [#uses=1]
%tmp.21 = add i32 %tmp.19, %tmp.20 ; <i32> [#uses=1]
- store i32 %tmp.21, i32* @Z
+ store i32 %tmp.21, ptr @Z
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, 100 ; <i1> [#uses=1]
br i1 %exitcond, label %loopexit.2, label %no_exit.2
%iv = phi i32 [ 0, %entry ], [ %iv.next, %for.cond ]
%cmp1 = icmp eq i32 %x, 0
%iv.next = add nuw nsw i32 %iv, 1
- %a = load i32, i32* @a
+ %a = load i32, ptr @a
br i1 %cmp1, label %for.cond, label %for.end
for.cond:
br i1 undef, label %bb3, label %bb1
bb3:
- %b = load i32*, i32** undef
+ %b = load ptr, ptr undef
br i1 undef, label %bb2, label %bb4
bb4:
br i1 undef, label %bb_end, label %bb1
bb_end:
- %x = getelementptr i32, i32* %b
+ %x = getelementptr i32, ptr %b
br label %bb_end
}
; CHECK: bb2:
bb2: ; preds = %bb2.loopexit, %bb2, %bb1
%i = phi i32 [ 0, %bb1 ], [ %i, %bb2 ], [ %i.ph, %bb2.loopexit ]
- %x = load i32, i32* undef, align 8
+ %x = load i32, ptr undef, align 8
br i1 undef, label %bb2, label %bb3.preheader
; CHECK: bb3.preheader:
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define void @test_01(i32* nocapture %a) local_unnamed_addr {
+define void @test_01(ptr nocapture %a) local_unnamed_addr {
; CHECK-LABEL: @test_01(
entry:
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 96
- %arrayidx.promoted51 = load i32, i32* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 96
+ %arrayidx.promoted51 = load i32, ptr %arrayidx, align 1
br label %while.body
while.body: ; preds = %entry, %while.end29
while.cond1.while.end29_crit_edge: ; preds = %while.end28
%.lcssa = phi i32 [ %mul17.lcssa, %while.end28 ]
%add.lcssa50.lcssa = phi i32 [ %add.lcssa, %while.end28 ]
- store i32 %add.lcssa50.lcssa, i32* %a, align 4
+ store i32 %add.lcssa50.lcssa, ptr %a, align 4
br label %while.end29
while.end29: ; preds = %while.cond1.while.end29_crit_edge, %while.body
%0 = phi i32 [ 0, %entry ], [ %add, %if.end ], [ %add, %if.then5 ]
%add = add i32 %0, 1
%cmp = icmp slt i32 %0, 1
- %tmp1 = load i32, i32* @maxStat, align 4
+ %tmp1 = load i32, ptr @maxStat, align 4
br i1 %cmp, label %for.body, label %for.cond14.preheader
for.cond14.preheader: ; preds = %for.cond
%i13.027 = phi i32 [ %1, %for.body18 ], [ 0, %for.cond14.preheader ]
call void @foo() nounwind
%1 = add nsw i32 %i13.027, 1
- %tmp16 = load i32, i32* @maxStat, align 4
+ %tmp16 = load i32, ptr @maxStat, align 4
%cmp17 = icmp slt i32 %1, %tmp16
br i1 %cmp17, label %for.body18, label %return
br label %Loop, !dbg !6
Loop: ; preds = %BE2, %BE1, %0
%IV = phi i32 [ 1, %0 ], [ %IV2, %BE1 ], [ %IV2, %BE2 ] ; <i32> [#uses=2]
- store i32 %IV, i32* null, !dbg !7
+ store i32 %IV, ptr null, !dbg !7
%IV2 = add i32 %IV, 2, !dbg !8 ; <i32> [#uses=2]
br i1 %C, label %BE1, label %BE2, !dbg !9
BE1: ; preds = %Loop
; When loopsimplify generates dedicated exit block for blocks that are landing
; pads (i.e. innerLoopExit in this test), we should not get confused with the
; unreachable pred (unreachableB) to innerLoopExit.
-define void @baz(i32 %trip) personality i32* ()* @wobble {
+define void @baz(i32 %trip) personality ptr @wobble {
entry:
br label %outerHeader
br label %innerH
innerH:
- %tmp50 = invoke i8 * undef()
+ %tmp50 = invoke ptr undef()
to label %innerLatch unwind label %innerLoopExit
innerLatch:
br i1 %cmp, label %innerH, label %retblock
unreachableB: ; No predecessors!
- %tmp62 = invoke i8 * undef()
+ %tmp62 = invoke ptr undef()
to label %retblock unwind label %innerLoopExit
; undedicated exit block (preds from inner and outer loop)
; Also has unreachableB as pred.
innerLoopExit:
- %tmp65 = landingpad { i8*, i32 }
+ %tmp65 = landingpad { ptr, i32 }
cleanup
invoke void @foo()
to label %outerHeader unwind label %unwindblock
unwindblock:
- %tmp67 = landingpad { i8*, i32 }
+ %tmp67 = landingpad { ptr, i32 }
cleanup
ret void
}
; Function Attrs: nounwind
-declare i32* @wobble()
+declare ptr @wobble()
; Function Attrs: uwtable
declare void @foo()
; RUN: opt -S -enable-loop-simplifycfg-term-folding=true -passes=loop-simplifycfg %s | FileCheck %s
; RUN: opt -S -enable-loop-simplifycfg-term-folding=true -passes='require<domtree>,loop(loop-simplifycfg)' %s | FileCheck %s
-declare i32* @fake_personality_function()
+declare ptr @fake_personality_function()
declare void @foo()
-define i32 @test_remove_lpad(i1 %exitcond) personality i32* ()* @fake_personality_function {
+define i32 @test_remove_lpad(i1 %exitcond) personality ptr @fake_personality_function {
; CHECK-LABEL: @test_remove_lpad(
entry:
br label %for.body
unreachable
}
-define i32 @test_remove_phi_lpad(i1 %exitcond) personality i32* ()* @fake_personality_function {
+define i32 @test_remove_phi_lpad(i1 %exitcond) personality ptr @fake_personality_function {
; CHECK-LABEL: @test_remove_phi_lpad(
entry:
br label %for.body
ret i32 %p
}
-define i32 @test_split_remove_phi_lpad_(i1 %exitcond) personality i32* ()* @fake_personality_function {
+define i32 @test_split_remove_phi_lpad_(i1 %exitcond) personality ptr @fake_personality_function {
; CHECK-LABEL: @test_split_remove_phi_lpad_(
entry:
invoke void @foo() to label %for.body unwind label %unwind-bb
; CHECK-NEXT: phi
; CHECK-NOT: br label
; CHECK: br i1
-define i32 @foo(i32* %P, i64* %Q) {
+define i32 @foo(ptr %P, ptr %Q) {
entry:
br label %outer
br label %inner
inner: ; preds = %outer
- store i32 0, i32* %P
- store i32 1, i32* %P
- store i32 2, i32* %P
+ store i32 0, ptr %P
+ store i32 1, ptr %P
+ store i32 2, ptr %P
%y.inc2 = add nsw i32 %y.2, 1
%exitcond.outer = icmp eq i32 %y.inc2, 3
- store i32 %y.2, i32* %P
+ store i32 %y.2, ptr %P
br i1 %exitcond.outer, label %exit, label %outer.latch2
outer.latch2: ; preds = %inner
%t = sext i32 %y.inc2 to i64
- store i64 %t, i64* %Q
+ store i64 %t, ptr %Q
br label %outer
exit: ; preds = %inner
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
; Make sure we update MSSA properly.
-define void @test(i32* %a, i32* %b) {
+define void @test(ptr %a, ptr %b) {
; CHECK-LABEL: @test(
entry:
unreachable
latch:
- store i32 %i, i32* %a
- store i32 %i, i32* %b
+ store i32 %i, ptr %a
+ store i32 %i, ptr %b
%i.inc = add nsw i32 %i, 1
%exitcond = icmp eq i32 %i.inc, 4
br i1 %exitcond, label %exit, label %for.body
; CHECK: for.cond:
; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[FOR_INC:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @a, align 1
+; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr @a, align 1
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i16 [[TMP0]], 0
; CHECK-NEXT: br label [[FOR_INC]]
; CHECK: for.inc:
br i1 %cond, label %if.then, label %for.inc
if.then:
- %0 = load i16, i16* @a, align 1
+ %0 = load i16, ptr @a, align 1
%tobool = icmp ne i16 %0, 0
br i1 %tobool, label %for.inc, label %for.inc
define internal fastcc void @test_01() unnamed_addr {
bb:
- %tmp = load i32, i32* @global.2, align 4
+ %tmp = load i32, ptr @global.2, align 4
%tmp1 = icmp eq i32 %tmp, 0
br i1 %tmp1, label %bb3, label %bb2
br i1 true, label %bb5, label %bb6
bb5: ; preds = %bb4
- store i16 0, i16* @global.3, align 2
+ store i16 0, ptr @global.3, align 2
br label %bb6
bb6: ; preds = %bb5, %bb4
br i1 %tmp12, label %bb13, label %bb14
bb13: ; preds = %bb11
- store i32 0, i32* @global.1, align 4
+ store i32 0, ptr @global.1, align 4
br label %bb11
bb14: ; preds = %bb11
bb1: ; preds = %bb16, %bb
%tmp = phi i8 [ %arg, %bb ], [ %tmp17, %bb16 ]
- %tmp2 = load i16, i16* @global.5, align 2
+ %tmp2 = load i16, ptr @global.5, align 2
%tmp3 = icmp ugt i16 %tmp2, 56
br i1 %tmp3, label %bb4, label %bb18
loopentry.4: ; preds = %loopentry.4, %loopexit.3
%indvar340 = phi i32 [ 0, %loopexit.3 ], [ %indvar.next341, %loopentry.4 ] ; <i32> [#uses=2]
%tmp. = add i32 %indvar340, %indvar342 ; <i32> [#uses=1]
- %tmp.526 = load i32*, i32** null ; <i32*> [#uses=1]
+ %tmp.526 = load ptr, ptr null ; <ptr> [#uses=1]
%gep.upgrd.1 = zext i32 %tmp. to i64 ; <i64> [#uses=1]
- %tmp.528 = getelementptr i32, i32* %tmp.526, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
- store i32 0, i32* %tmp.528
+ %tmp.528 = getelementptr i32, ptr %tmp.526, i64 %gep.upgrd.1 ; <ptr> [#uses=1]
+ store i32 0, ptr %tmp.528
%indvar.next341 = add i32 %indvar340, 1 ; <i32> [#uses=1]
br label %loopentry.4
}
br label %loopentry.1
loopentry.1: ; preds = %loopentry.1, %loopentry.1.outer
%i.3 = phi i32 [ 0, %loopentry.1.outer ], [ %i.3.be, %loopentry.1 ] ; <i32> [#uses=2]
- %tmp.390 = load i32, i32* null ; <i32> [#uses=1]
+ %tmp.390 = load i32, ptr null ; <i32> [#uses=1]
%tmp.392 = mul i32 %tmp.390, %j.2.1.ph ; <i32> [#uses=1]
%tmp.394 = add i32 %tmp.392, %i.3 ; <i32> [#uses=1]
%i.3.be = add i32 %i.3, 1 ; <i32> [#uses=1]
target triple = "i686-apple-darwin9"
-define i8* @foo( i8* %ABC) {
+define ptr @foo( ptr %ABC) {
entry:
switch i8 0, label %bb129 [
i8 0, label %UnifiedReturnBlock
cond_next102: ; preds = %bb16
%tmp138145.rec = add i32 %ABC.2146.0.rec, 3 ; <i32> [#uses=1]
- %tmp138145 = getelementptr i8, i8* %ABC, i32 %tmp138145.rec ; <i8*> [#uses=4]
+ %tmp138145 = getelementptr i8, ptr %ABC, i32 %tmp138145.rec ; <ptr> [#uses=4]
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
switch i8 0, label %bb129.loopexit [
i8 0, label %UnifiedReturnBlock.loopexit
br label %bb129
bb129: ; preds = %bb129.loopexit, %entry
- ret i8* null
+ ret ptr null
UnifiedReturnBlock.loopexit: ; preds = %cond_next102, %cond_next102, %cond_next102, %cond_next102, %bb16
- %UnifiedRetVal.ph = phi i8* [ %tmp138145, %cond_next102 ], [ %tmp138145, %cond_next102 ], [ %tmp138145, %cond_next102 ], [ %tmp138145, %cond_next102 ], [ null, %bb16 ] ; <i8*> [#uses=0]
+ %UnifiedRetVal.ph = phi ptr [ %tmp138145, %cond_next102 ], [ %tmp138145, %cond_next102 ], [ %tmp138145, %cond_next102 ], [ %tmp138145, %cond_next102 ], [ null, %bb16 ] ; <ptr> [#uses=0]
br label %UnifiedReturnBlock
UnifiedReturnBlock: ; preds = %UnifiedReturnBlock.loopexit, %entry, %entry, %entry, %entry
- ret i8* null
+ ret ptr null
}
-define i8* @bar() {
+define ptr @bar() {
entry:
switch i8 0, label %bb158 [
i8 37, label %bb74
]
bb11: ; preds = %entry
- ret i8* null
+ ret ptr null
cond_true: ; preds = %entry
- ret i8* null
+ ret ptr null
bb74: ; preds = %entry
- ret i8* null
+ ret ptr null
bb158: ; preds = %entry
- ret i8* null
+ ret ptr null
}
; Provide legal integer types.
target datalayout = "n8:16:32:64"
-@g_3 = common global i16 0 ; <i16*> [#uses=2]
-@"\01LC" = internal constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=1]
+@g_3 = common global i16 0 ; <ptr> [#uses=2]
+@"\01LC" = internal constant [4 x i8] c"%d\0A\00" ; <ptr> [#uses=1]
define void @func_1() nounwind {
entry:
bb: ; preds = %bb, %entry
%l_2.0.reg2mem.0 = phi i16 [ 0, %entry ], [ %t1, %bb ] ; <i16> [#uses=2]
%t0 = shl i16 %l_2.0.reg2mem.0, 1 ; <i16>:0 [#uses=1]
- store volatile i16 %t0, i16* @g_3, align 2
+ store volatile i16 %t0, ptr @g_3, align 2
%t1 = add i16 %l_2.0.reg2mem.0, -3 ; <i16>:1 [#uses=2]
%t2 = icmp slt i16 %t1, 1 ; <i1>:2 [#uses=1]
br i1 %t2, label %bb, label %return
define i32 @main() nounwind {
entry:
tail call void @func_1( ) nounwind
- load volatile i16, i16* @g_3, align 2 ; <i16>:0 [#uses=1]
+ load volatile i16, ptr @g_3, align 2 ; <i16>:0 [#uses=1]
zext i16 %0 to i32 ; <i32>:1 [#uses=1]
- tail call i32 (i8*, ...) @printf( i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i32 0), i32 %1 ) nounwind ; <i32>:2 [#uses=0]
+ tail call i32 (ptr, ...) @printf( ptr @"\01LC", i32 %1 ) nounwind ; <i32>:2 [#uses=0]
ret i32 0
}
-declare i32 @printf(i8*, ...) nounwind
+declare i32 @printf(ptr, ...) nounwind
; Provide legal integer types.
target datalayout = "n8:16:32:64"
-@g_19 = common global i32 0 ; <i32*> [#uses=3]
-@"\01LC" = internal constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=1]
+@g_19 = common global i32 0 ; <ptr> [#uses=3]
+@"\01LC" = internal constant [4 x i8] c"%d\0A\00" ; <ptr> [#uses=1]
define i32 @func_8(i8 zeroext %p_9) nounwind {
entry:
%indvar = phi i16 [ 0, %entry ], [ %indvar.next, %bb ] ; <i16> [#uses=2]
%tmp = sub i16 0, %indvar ; <i16> [#uses=1]
%tmp27 = trunc i16 %tmp to i8 ; <i8> [#uses=1]
- load i32, i32* @g_19, align 4 ; <i32>:0 [#uses=2]
+ load i32, ptr @g_19, align 4 ; <i32>:0 [#uses=2]
add i32 %0, 1 ; <i32>:1 [#uses=1]
- store i32 %1, i32* @g_19, align 4
+ store i32 %1, ptr @g_19, align 4
trunc i32 %0 to i8 ; <i8>:2 [#uses=1]
tail call i32 @func_8( i8 zeroext %2 ) nounwind ; <i32>:3 [#uses=0]
shl i8 %tmp27, 2 ; <i8>:4 [#uses=1]
define i32 @main() nounwind {
entry:
tail call void @func_1( ) nounwind
- load i32, i32* @g_19, align 4 ; <i32>:0 [#uses=1]
- tail call i32 (i8*, ...) @printf( i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i32 0), i32 %0 ) nounwind ; <i32>:1 [#uses=0]
+ load i32, ptr @g_19, align 4 ; <i32>:0 [#uses=1]
+ tail call i32 (ptr, ...) @printf( ptr @"\01LC", i32 %0 ) nounwind ; <i32>:1 [#uses=0]
ret i32 0
}
-declare i32 @printf(i8*, ...) nounwind
+declare i32 @printf(ptr, ...) nounwind
; IV of stride %3.
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin9.5"
- %struct.anon = type { %struct.obj*, %struct.obj* }
+ %struct.anon = type { ptr, ptr }
%struct.obj = type { i16, i16, { %struct.anon } }
-@heap_size = external global i32 ; <i32*> [#uses=1]
-@"\01LC85" = external constant [39 x i8] ; <[39 x i8]*> [#uses=1]
+@heap_size = external global i32 ; <ptr> [#uses=1]
+@"\01LC85" = external constant [39 x i8] ; <ptr> [#uses=1]
-declare i32 @sprintf(i8*, i8*, ...) nounwind
+declare i32 @sprintf(ptr, ptr, ...) nounwind
-define %struct.obj* @gc_status(%struct.obj* %args) nounwind {
+define ptr @gc_status(ptr %args) nounwind {
entry:
br label %bb1.i
br i1 %0, label %bb2.i3, label %nactive_heaps.exit
bb2.i3: ; preds = %bb1.i
- %1 = load %struct.obj*, %struct.obj** null, align 4 ; <%struct.obj*> [#uses=1]
- %2 = icmp eq %struct.obj* %1, null ; <i1> [#uses=1]
+ %1 = load ptr, ptr null, align 4 ; <ptr> [#uses=1]
+ %2 = icmp eq ptr %1, null ; <i1> [#uses=1]
br i1 %2, label %nactive_heaps.exit, label %bb.i2
nactive_heaps.exit: ; preds = %bb2.i3, %bb1.i
- %3 = load i32, i32* @heap_size, align 4 ; <i32> [#uses=1]
+ %3 = load i32, ptr @heap_size, align 4 ; <i32> [#uses=1]
%4 = mul i32 %3, %m.0.i ; <i32> [#uses=1]
%5 = sub i32 %4, 0 ; <i32> [#uses=1]
- %6 = tail call i32 (i8*, i8*, ...) @sprintf(i8* null, i8* getelementptr ([39 x i8], [39 x i8]* @"\01LC85", i32 0, i32 0), i32 %m.0.i, i32 0, i32 %5, i32 0) nounwind ; <i32> [#uses=0]
- ret %struct.obj* null
+ %6 = tail call i32 (ptr, ptr, ...) @sprintf(ptr null, ptr @"\01LC85", i32 %m.0.i, i32 0, i32 %5, i32 0) nounwind ; <i32> [#uses=0]
+ ret ptr null
}
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin9.6"
-@table = common global [32 x [256 x i32]] zeroinitializer, align 32 ; <[32 x [256 x i32]]*> [#uses=2]
+@table = common global [32 x [256 x i32]] zeroinitializer, align 32 ; <ptr> [#uses=2]
define i32 @main() nounwind {
bb4.thread:
%0 = trunc i32 %j.0.reg2mem.0 to i8 ; <i8> [#uses=1]
%1 = sext i8 %0 to i32 ; <i32> [#uses=1]
%2 = mul i32 %1, %i.0.reg2mem.0.ph ; <i32> [#uses=1]
- %3 = getelementptr [32 x [256 x i32]], [32 x [256 x i32]]* @table, i32 0, i32 %i.0.reg2mem.0.ph, i32 %j.0.reg2mem.0 ; <i32*> [#uses=1]
- store i32 %2, i32* %3, align 4
+ %3 = getelementptr [32 x [256 x i32]], ptr @table, i32 0, i32 %i.0.reg2mem.0.ph, i32 %j.0.reg2mem.0 ; <ptr> [#uses=1]
+ store i32 %2, ptr %3, align 4
%indvar.next = add i32 %j.0.reg2mem.0, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, 256 ; <i1> [#uses=1]
br i1 %exitcond, label %bb4, label %bb2
br i1 %exitcond10, label %bb5, label %bb2
bb5: ; preds = %bb4
- %4 = load i32, i32* getelementptr ([32 x [256 x i32]], [32 x [256 x i32]]* @table, i32 0, i32 9, i32 132), align 16 ; <i32> [#uses=1]
+ %4 = load i32, ptr getelementptr ([32 x [256 x i32]], ptr @table, i32 0, i32 9, i32 132), align 16 ; <i32> [#uses=1]
%5 = icmp eq i32 %4, -1116 ; <i1> [#uses=1]
br i1 %5, label %bb7, label %bb6
; PR10386
declare i1 @foo()
-declare i8* @bar(i8*,i8*,i8*,i8*)
+declare ptr @bar(ptr,ptr,ptr,ptr)
-define void @f(i64* %a,i64* %b,i64* %c,i64* %d,i64* %e,i64* %f,i64* %g) nounwind uwtable {
+define void @f(ptr %a,ptr %b,ptr %c,ptr %d,ptr %e,ptr %f,ptr %g) nounwind uwtable {
entry:
br label %b_throw.preheader
while.cond.i: ; preds = %while.body.i15795, %if.then.i15791
%phi = phi i64 [ %tmp20916, %while.body.i15795 ], [ 0, %H_MPZ_LBL ]
%tmp20916 = add i64 %phi, 1
- %incdec.ptr.i15793 = getelementptr i64, i64* %pc.0.lcssa.i1610719352, i64 %tmp20916
+ %incdec.ptr.i15793 = getelementptr i64, ptr %pc.0.lcssa.i1610719352, i64 %tmp20916
%boo2 = call i1 @foo()
br i1 %boo2, label %indirectgoto, label %while.body.i15795
while.body.i15795: ; preds = %while.cond.i
- %tmp20.i = load i64, i64* %incdec.ptr.i15793, align 8
+ %tmp20.i = load i64, ptr %incdec.ptr.i15793, align 8
%boo1 = call i1 @foo()
br i1 %boo1, label %while.cond.i, label %body_failed
br label %indirectgoto
body_failed:
- %pc.0.lcssa.i1610719364 = phi i64* [ %pc.0.lcssa.i1610719352, %indirectgoto ], [ %pc.0.lcssa.i1610719352, %H_MPZ_LBL ], [ %b, %H_CONST_LBL ], [ %pc.0.lcssa.i1610719352, %while.body.i15795 ]
+ %pc.0.lcssa.i1610719364 = phi ptr [ %pc.0.lcssa.i1610719352, %indirectgoto ], [ %pc.0.lcssa.i1610719352, %H_MPZ_LBL ], [ %b, %H_CONST_LBL ], [ %pc.0.lcssa.i1610719352, %while.body.i15795 ]
call i1 @foo()
br label %b_throw.preheader
indirectgoto:
- %pc.0.lcssa.i1610719352 = phi i64* [ %pc.0.lcssa.i1610719352, %D_BREAK_LBL ], [ %a, %b_throw.preheader ], [ %d, %while.cond.i ]
- %p = call i8* @bar(i8* blockaddress(@f, %D_BREAK_LBL), i8* blockaddress(@f, %H_CONST_LBL), i8* blockaddress(@f, %H_MPZ_LBL), i8* blockaddress(@f, %body_failed) )
- indirectbr i8* %p, [label %D_BREAK_LBL, label %H_CONST_LBL, label %H_MPZ_LBL, label %body_failed]
+ %pc.0.lcssa.i1610719352 = phi ptr [ %pc.0.lcssa.i1610719352, %D_BREAK_LBL ], [ %a, %b_throw.preheader ], [ %d, %while.cond.i ]
+ %p = call ptr @bar(ptr blockaddress(@f, %D_BREAK_LBL), ptr blockaddress(@f, %H_CONST_LBL), ptr blockaddress(@f, %H_MPZ_LBL), ptr blockaddress(@f, %body_failed) )
+ indirectbr ptr %p, [label %D_BREAK_LBL, label %H_CONST_LBL, label %H_MPZ_LBL, label %body_failed]
}
-; CHECK: %p = call i8* @bar(i8* blockaddress(@f, %D_BREAK_LBL), i8* blockaddress(@f, %H_CONST_LBL), i8* blockaddress(@f, %H_MPZ_LBL), i8* blockaddress(@f, %body_failed))
-; CHECK: indirectbr i8* %p, [label %D_BREAK_LBL, label %H_CONST_LBL, label %H_MPZ_LBL, label %body_failed]
+; CHECK: %p = call ptr @bar(ptr blockaddress(@f, %D_BREAK_LBL), ptr blockaddress(@f, %H_CONST_LBL), ptr blockaddress(@f, %H_MPZ_LBL), ptr blockaddress(@f, %body_failed))
+; CHECK: indirectbr ptr %p, [label %D_BREAK_LBL, label %H_CONST_LBL, label %H_MPZ_LBL, label %body_failed]
; Verify that identical edges are merged. rdar://problem/6453893
-define i8* @test1() {
+define ptr @test1() {
;
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[LSR_IV:%.*]] = phi i8* [ [[SCEVGEP:%.*]], [[LOOP]] ], [ null, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[SCEVGEP]] = getelementptr i8, i8* [[LSR_IV]], i64 1
+; CHECK-NEXT: [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], [[LOOP]] ], [ null, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[SCEVGEP]] = getelementptr i8, ptr [[LSR_IV]], i64 1
; CHECK-NEXT: br i1 false, label [[LOOP]], label [[LOOPEXIT:%.*]]
; CHECK: loopexit:
; CHECK-NEXT: br i1 false, label [[BBA:%.*]], label [[BBB:%.*]]
; CHECK: bbB.bb89_crit_edge:
; CHECK-NEXT: br label [[BB89]]
; CHECK: bb89:
-; CHECK-NEXT: [[TMP75PHI:%.*]] = phi i8* [ [[SCEVGEP]], [[BBA_BB89_CRIT_EDGE]] ], [ [[SCEVGEP]], [[BBB_BB89_CRIT_EDGE]] ]
+; CHECK-NEXT: [[TMP75PHI:%.*]] = phi ptr [ [[SCEVGEP]], [[BBA_BB89_CRIT_EDGE]] ], [ [[SCEVGEP]], [[BBB_BB89_CRIT_EDGE]] ]
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: exit:
-; CHECK-NEXT: ret i8* [[TMP75PHI]]
+; CHECK-NEXT: ret ptr [[TMP75PHI]]
;
entry:
br label %loop
loop:
%rec = phi i32 [ %next, %loop ], [ 0, %entry ]
%next = add i32 %rec, 1
- %tmp75 = getelementptr i8, i8* null, i32 %next
+ %tmp75 = getelementptr i8, ptr null, i32 %next
br i1 false, label %loop, label %loopexit
loopexit:
]
bb89:
- %tmp75phi = phi i8* [ %tmp75, %bbA ], [ %tmp75, %bbA ], [ %tmp75, %bbA ], [ %tmp75, %bbB ], [ %tmp75, %bbB ], [ %tmp75, %bbB ]
+ %tmp75phi = phi ptr [ %tmp75, %bbA ], [ %tmp75, %bbA ], [ %tmp75, %bbA ], [ %tmp75, %bbB ], [ %tmp75, %bbB ], [ %tmp75, %bbB ]
br label %exit
exit:
- ret i8* %tmp75phi
+ ret ptr %tmp75phi
}
; Handle single-predecessor phis: PR13756
-define i8* @test2() {
+define ptr @test2() {
;
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[LSR_IV:%.*]] = phi i8* [ [[SCEVGEP:%.*]], [[LOOP]] ], [ null, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[SCEVGEP]] = getelementptr i8, i8* [[LSR_IV]], i64 1
+; CHECK-NEXT: [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], [[LOOP]] ], [ null, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[SCEVGEP]] = getelementptr i8, ptr [[LSR_IV]], i64 1
; CHECK-NEXT: br i1 false, label [[LOOP]], label [[LOOPEXIT:%.*]]
; CHECK: loopexit:
; CHECK-NEXT: br i1 false, label [[BBA:%.*]], label [[BBB:%.*]]
; CHECK: bbB.exit_crit_edge:
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: bb89:
-; CHECK-NEXT: [[TMP75PHI:%.*]] = phi i8* [ [[SCEVGEP]], [[BBA]] ], [ [[SCEVGEP]], [[BBA]] ], [ [[SCEVGEP]], [[BBA]] ]
+; CHECK-NEXT: [[TMP75PHI:%.*]] = phi ptr [ [[SCEVGEP]], [[BBA]] ], [ [[SCEVGEP]], [[BBA]] ], [ [[SCEVGEP]], [[BBA]] ]
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: [[RESULT:%.*]] = phi i8* [ [[TMP75PHI]], [[BB89]] ], [ [[SCEVGEP]], [[BBB_EXIT_CRIT_EDGE]] ]
-; CHECK-NEXT: ret i8* [[RESULT]]
+; CHECK-NEXT: [[RESULT:%.*]] = phi ptr [ [[TMP75PHI]], [[BB89]] ], [ [[SCEVGEP]], [[BBB_EXIT_CRIT_EDGE]] ]
+; CHECK-NEXT: ret ptr [[RESULT]]
;
entry:
br label %loop
loop:
%rec = phi i32 [ %next, %loop ], [ 0, %entry ]
%next = add i32 %rec, 1
- %tmp75 = getelementptr i8, i8* null, i32 %next
+ %tmp75 = getelementptr i8, ptr null, i32 %next
br i1 false, label %loop, label %loopexit
loopexit:
]
bb89:
- %tmp75phi = phi i8* [ %tmp75, %bbA ], [ %tmp75, %bbA ], [ %tmp75, %bbA ]
+ %tmp75phi = phi ptr [ %tmp75, %bbA ], [ %tmp75, %bbA ], [ %tmp75, %bbA ]
br label %exit
exit:
- %result = phi i8* [ %tmp75phi, %bb89 ], [ %tmp75, %bbB ], [ %tmp75, %bbB ], [ %tmp75, %bbB ]
- ret i8* %result
+ %result = phi ptr [ %tmp75phi, %bb89 ], [ %tmp75, %bbB ], [ %tmp75, %bbB ], [ %tmp75, %bbB ]
+ ret ptr %result
}
; CHECK: icmp
; CHECK: icmp
; CHECK-NOT: icmp
-define void @test(i8* %base, i32 %a0) nounwind {
+define void @test(ptr %base, i32 %a0) nounwind {
entry:
br label %bb1
bb1:
%t15 = icmp ugt i32 %n15, -4
%m15 = select i1 %t15, i32 %n15, i32 -4
%a16 = add i32 %m15, %a15
- %gep = getelementptr i8, i8* %base, i32 %a16
+ %gep = getelementptr i8, ptr %base, i32 %a16
%ofs = add i32 %a16, 4
- %limit = getelementptr i8, i8* %base, i32 %ofs
+ %limit = getelementptr i8, ptr %base, i32 %ofs
br label %loop
loop:
- %iv = phi i8* [ %gep, %bb1 ], [ %inc, %loop ]
- %inc = getelementptr inbounds i8, i8* %iv, i64 1
- %exitcond = icmp eq i8* %inc, %limit
+ %iv = phi ptr [ %gep, %bb1 ], [ %inc, %loop ]
+ %inc = getelementptr inbounds i8, ptr %iv, i64 1
+ %exitcond = icmp eq ptr %inc, %limit
br i1 %exitcond, label %loop, label %exit
exit:
br i1 undef, label %for.body83, label %for.end.critedge
for.body83: ; preds = %for.body83, %for.end
- %ptr.0157 = phi i8* [ %add.ptr96, %for.body83 ], [ null, %for.end ]
- store i8 undef, i8* %ptr.0157, align 1
- %add.ptr96 = getelementptr inbounds i8, i8* %ptr.0157, i32 %cond
+ %ptr.0157 = phi ptr [ %add.ptr96, %for.body83 ], [ null, %for.end ]
+ store i8 undef, ptr %ptr.0157, align 1
+ %add.ptr96 = getelementptr inbounds i8, ptr %ptr.0157, i32 %cond
br label %for.body83
}
; Currently, LSR won't kick in on such loops.
; CHECK-LABEL: @nopreheader(
; CHECK: bb7.us:
-; CHECK-NOT: phi float*
+; CHECK-NOT: phi ptr
; CHECK: %j.01.us = phi i32
-; CHECK-NOT: phi float*
-define void @nopreheader(float* nocapture %a, i32 %n) nounwind {
+; CHECK-NOT: phi ptr
+define void @nopreheader(ptr nocapture %a, i32 %n) nounwind {
entry:
%0 = sdiv i32 %n, undef
- indirectbr i8* undef, [label %bb10.preheader]
+ indirectbr ptr undef, [label %bb10.preheader]
bb10.preheader: ; preds = %bb4
- indirectbr i8* undef, [label %bb8.preheader.lr.ph, label %return]
+ indirectbr ptr undef, [label %bb8.preheader.lr.ph, label %return]
bb8.preheader.lr.ph: ; preds = %bb10.preheader
- indirectbr i8* null, [label %bb7.lr.ph.us, label %bb9]
+ indirectbr ptr null, [label %bb7.lr.ph.us, label %bb9]
bb7.lr.ph.us: ; preds = %bb9.us, %bb8.preheader.lr.ph
%i.12.us = phi i32 [ %2, %bb9.us ], [ 0, %bb8.preheader.lr.ph ]
%tmp30 = mul i32 %0, %i.12.us
- indirectbr i8* undef, [label %bb7.us]
+ indirectbr ptr undef, [label %bb7.us]
bb7.us: ; preds = %bb7.lr.ph.us, %bb7.us
%j.01.us = phi i32 [ 0, %bb7.lr.ph.us ], [ %1, %bb7.us ]
%tmp31 = add i32 %tmp30, %j.01.us
- %scevgep9 = getelementptr float, float* %a, i32 %tmp31
- store float undef, float* %scevgep9, align 1
+ %scevgep9 = getelementptr float, ptr %a, i32 %tmp31
+ store float undef, ptr %scevgep9, align 1
%1 = add nsw i32 %j.01.us, 1
- indirectbr i8* undef, [label %bb9.us, label %bb7.us]
+ indirectbr ptr undef, [label %bb9.us, label %bb7.us]
bb9.us: ; preds = %bb7.us
%2 = add nsw i32 %i.12.us, 1
- indirectbr i8* undef, [label %bb7.lr.ph.us, label %return]
+ indirectbr ptr undef, [label %bb7.lr.ph.us, label %return]
bb9: ; preds = %bb9, %bb8.preheader.lr.ph
- indirectbr i8* undef, [label %bb9, label %return]
+ indirectbr ptr undef, [label %bb9, label %return]
return: ; preds = %bb9, %bb9.us, %bb10.preheader
ret void
; CHECK-LABEL: @nopreheader2(
; CHECK: bb7:
; CHECK: %indvar = phi i32
-define fastcc void @nopreheader2([200 x i32]* nocapture %Array2) nounwind {
+define fastcc void @nopreheader2(ptr nocapture %Array2) nounwind {
entry:
- indirectbr i8* undef, [label %bb]
+ indirectbr ptr undef, [label %bb]
bb: ; preds = %bb, %entry
- indirectbr i8* undef, [label %bb3, label %bb]
+ indirectbr ptr undef, [label %bb3, label %bb]
bb3: ; preds = %bb3, %bb
- indirectbr i8* undef, [label %bb8.preheader, label %bb3]
+ indirectbr ptr undef, [label %bb8.preheader, label %bb3]
bb8.preheader: ; preds = %bb9, %bb3
%indvar5 = phi i32 [ %indvar.next6, %bb9 ], [ 0, %bb3 ]
%tmp26 = add i32 %indvar5, 13
- indirectbr i8* null, [label %bb7]
+ indirectbr ptr null, [label %bb7]
bb7: ; preds = %bb8.preheader, %bb7
%indvar = phi i32 [ 0, %bb8.preheader ], [ %indvar.next, %bb7 ]
- %scevgep = getelementptr [200 x i32], [200 x i32]* %Array2, i32 %tmp26, i32 %indvar
- store i32 undef, i32* %scevgep, align 4
+ %scevgep = getelementptr [200 x i32], ptr %Array2, i32 %tmp26, i32 %indvar
+ store i32 undef, ptr %scevgep, align 4
%indvar.next = add i32 %indvar, 1
- indirectbr i8* undef, [label %bb9, label %bb7]
+ indirectbr ptr undef, [label %bb9, label %bb7]
bb9: ; preds = %bb7
%indvar.next6 = add i32 %indvar5, 1
- indirectbr i8* undef, [label %return, label %bb8.preheader]
+ indirectbr ptr undef, [label %return, label %bb8.preheader]
return: ; preds = %bb9
ret void
; CHECK: phi i64
; CHECK-NOT: phi
; CHECK: indirectbr
-define void @nopreheader(i8* %end) nounwind {
+define void @nopreheader(ptr %end) nounwind {
entry:
br label %while.cond179
while.cond179: ; preds = %if.end434, %if.end369, %if.end277, %if.end165
- %s.1 = phi i8* [ undef, %if.end434 ], [ %incdec.ptr356, %if.end348 ], [ undef, %entry ]
- indirectbr i8* undef, [label %land.rhs184, label %while.end453]
+ %s.1 = phi ptr [ undef, %if.end434 ], [ %incdec.ptr356, %if.end348 ], [ undef, %entry ]
+ indirectbr ptr undef, [label %land.rhs184, label %while.end453]
land.rhs184: ; preds = %while.cond179
- indirectbr i8* undef, [label %while.end453, label %while.cond197]
+ indirectbr ptr undef, [label %while.end453, label %while.cond197]
while.cond197: ; preds = %land.rhs202, %land.rhs184
%0 = phi i64 [ %indvar.next11, %land.rhs202 ], [ 0, %land.rhs184 ]
- indirectbr i8* undef, [label %land.rhs202, label %while.end215]
+ indirectbr ptr undef, [label %land.rhs202, label %while.end215]
land.rhs202: ; preds = %while.cond197
%indvar.next11 = add i64 %0, 1
- indirectbr i8* undef, [label %while.end215, label %while.cond197]
+ indirectbr ptr undef, [label %while.end215, label %while.cond197]
while.end215: ; preds = %land.rhs202, %while.cond197
- indirectbr i8* undef, [label %PREMATURE, label %if.end221]
+ indirectbr ptr undef, [label %PREMATURE, label %if.end221]
if.end221: ; preds = %while.end215
- indirectbr i8* undef, [label %while.cond238.preheader, label %lor.lhs.false227]
+ indirectbr ptr undef, [label %while.cond238.preheader, label %lor.lhs.false227]
lor.lhs.false227: ; preds = %if.end221
- indirectbr i8* undef, [label %while.cond238.preheader, label %if.else]
+ indirectbr ptr undef, [label %while.cond238.preheader, label %if.else]
while.cond238.preheader: ; preds = %lor.lhs.false227, %if.end221
%tmp16 = add i64 %0, 2
- indirectbr i8* undef, [label %while.cond238]
+ indirectbr ptr undef, [label %while.cond238]
while.cond238: ; preds = %land.rhs243, %while.cond238.preheader
%1 = phi i64 [ %indvar.next15, %land.rhs243 ], [ 0, %while.cond238.preheader ]
%tmp36 = add i64 %tmp16, %1
- %s.3 = getelementptr i8, i8* %s.1, i64 %tmp36
- %cmp241 = icmp ult i8* %s.3, %end
- indirectbr i8* undef, [label %land.rhs243, label %while.end256]
+ %s.3 = getelementptr i8, ptr %s.1, i64 %tmp36
+ %cmp241 = icmp ult ptr %s.3, %end
+ indirectbr ptr undef, [label %land.rhs243, label %while.end256]
land.rhs243: ; preds = %while.cond238
%indvar.next15 = add i64 %1, 1
- indirectbr i8* undef, [label %while.end256, label %while.cond238]
+ indirectbr ptr undef, [label %while.end256, label %while.cond238]
while.end256: ; preds = %land.rhs243, %while.cond238
- indirectbr i8* undef, [label %PREMATURE]
+ indirectbr ptr undef, [label %PREMATURE]
if.else: ; preds = %lor.lhs.false227
- indirectbr i8* undef, [label %if.then297, label %if.else386]
+ indirectbr ptr undef, [label %if.then297, label %if.else386]
if.then297: ; preds = %if.else
- indirectbr i8* undef, [label %PREMATURE, label %if.end307]
+ indirectbr ptr undef, [label %PREMATURE, label %if.end307]
if.end307: ; preds = %if.then297
- indirectbr i8* undef, [label %if.end314, label %FAIL]
+ indirectbr ptr undef, [label %if.end314, label %FAIL]
if.end314: ; preds = %if.end307
- indirectbr i8* undef, [label %if.end340]
+ indirectbr ptr undef, [label %if.end340]
if.end340: ; preds = %while.end334
- indirectbr i8* undef, [label %PREMATURE, label %if.end348]
+ indirectbr ptr undef, [label %PREMATURE, label %if.end348]
if.end348: ; preds = %if.end340
- %incdec.ptr356 = getelementptr inbounds i8, i8* undef, i64 2
- indirectbr i8* undef, [label %while.cond179]
+ %incdec.ptr356 = getelementptr inbounds i8, ptr undef, i64 2
+ indirectbr ptr undef, [label %while.cond179]
if.else386: ; preds = %if.else
- indirectbr i8* undef, [label %while.end453, label %if.end434]
+ indirectbr ptr undef, [label %while.end453, label %if.end434]
if.end434: ; preds = %if.then428, %if.end421
- indirectbr i8* undef, [label %while.cond179]
+ indirectbr ptr undef, [label %while.cond179]
while.end453: ; preds = %if.else386, %land.rhs184, %while.cond179
- indirectbr i8* undef, [label %PREMATURE, label %if.end459]
+ indirectbr ptr undef, [label %PREMATURE, label %if.end459]
if.end459: ; preds = %while.end453
- indirectbr i8* undef, [label %if.then465, label %FAIL]
+ indirectbr ptr undef, [label %if.then465, label %FAIL]
if.then465: ; preds = %if.end459
- indirectbr i8* undef, [label %return, label %if.then479]
+ indirectbr ptr undef, [label %return, label %if.then479]
if.then479: ; preds = %if.then465
- indirectbr i8* undef, [label %return]
+ indirectbr ptr undef, [label %return]
FAIL: ; preds = %if.end459, %if.end307, %land.lhs.true142, %land.lhs.true131, %while.end
- indirectbr i8* undef, [label %DECL_FAIL]
+ indirectbr ptr undef, [label %DECL_FAIL]
PREMATURE: ; preds = %while.end453, %while.end415, %if.end340, %while.end334, %if.then297, %while.end256, %while.end215
- indirectbr i8* undef, [label %return, label %if.then495]
+ indirectbr ptr undef, [label %return, label %if.then495]
if.then495: ; preds = %PREMATURE
- indirectbr i8* undef, [label %return]
+ indirectbr ptr undef, [label %return]
DECL_FAIL: ; preds = %if.then488, %FAIL, %land.lhs.true99, %lor.lhs.false, %if.end83, %if.then39, %if.end
- indirectbr i8* undef, [label %return]
+ indirectbr ptr undef, [label %return]
return: ; preds = %if.then512, %if.end504, %DECL_FAIL, %if.then495, %PREMATURE, %if.then479, %if.then465, %if.then69, %if.end52, %if.end19, %if.then
ret void
; CHECK-LABEL: @nopreheader(
; CHECK: for.cond:
; CHECK: %tmp128 = add i64 %0, %indvar65
-define void @nopreheader(i8* %cmd) nounwind ssp {
+define void @nopreheader(ptr %cmd) nounwind ssp {
entry:
- indirectbr i8* undef, [label %while.cond]
+ indirectbr ptr undef, [label %while.cond]
while.cond: ; preds = %while.body, %entry
%0 = phi i64 [ %indvar.next48, %while.body ], [ 0, %entry ]
- indirectbr i8* undef, [label %while.end, label %while.body]
+ indirectbr ptr undef, [label %while.end, label %while.body]
while.body: ; preds = %lor.rhs, %lor.lhs.false17, %lor.lhs.false11, %lor.lhs.false, %land.rhs
%indvar.next48 = add i64 %0, 1
- indirectbr i8* undef, [label %while.cond]
+ indirectbr ptr undef, [label %while.cond]
while.end: ; preds = %lor.rhs, %while.cond
- indirectbr i8* undef, [label %if.end152]
+ indirectbr ptr undef, [label %if.end152]
if.end152: ; preds = %lor.lhs.false144, %if.end110
- indirectbr i8* undef, [label %lor.lhs.false184, label %for.cond]
+ indirectbr ptr undef, [label %lor.lhs.false184, label %for.cond]
lor.lhs.false184: ; preds = %lor.lhs.false177
- indirectbr i8* undef, [label %return, label %for.cond]
+ indirectbr ptr undef, [label %return, label %for.cond]
for.cond: ; preds = %for.inc, %lor.lhs.false184, %if.end152
%indvar65 = phi i64 [ %indvar.next66, %for.inc ], [ 0, %lor.lhs.false184 ], [ 0, %if.end152 ]
%tmp128 = add i64 %0, %indvar65
- %s.4 = getelementptr i8, i8* %cmd, i64 %tmp128
- %tmp195 = load i8, i8* %s.4, align 1
- indirectbr i8* undef, [label %return, label %land.rhs198]
+ %s.4 = getelementptr i8, ptr %cmd, i64 %tmp128
+ %tmp195 = load i8, ptr %s.4, align 1
+ indirectbr ptr undef, [label %return, label %land.rhs198]
land.rhs198: ; preds = %for.cond
- indirectbr i8* undef, [label %return, label %for.inc]
+ indirectbr ptr undef, [label %return, label %for.inc]
for.inc: ; preds = %lor.rhs234, %land.lhs.true228, %land.lhs.true216, %land.lhs.true204
%indvar.next66 = add i64 %indvar65, 1
- indirectbr i8* undef, [label %for.cond]
+ indirectbr ptr undef, [label %for.cond]
return: ; preds = %if.end677, %doshell, %if.then96
ret void
; User. Just make sure it doesn't assert.
define void @nopreheader2() nounwind ssp {
entry:
- indirectbr i8* undef, [label %while.cond, label %return]
+ indirectbr ptr undef, [label %while.cond, label %return]
while.cond: ; preds = %while.cond.backedge, %entry
- indirectbr i8* undef, [label %while.cond.backedge, label %lor.rhs]
+ indirectbr ptr undef, [label %while.cond.backedge, label %lor.rhs]
lor.rhs: ; preds = %while.cond
- indirectbr i8* undef, [label %while.cond.backedge, label %while.end]
+ indirectbr ptr undef, [label %while.cond.backedge, label %while.end]
while.cond.backedge: ; preds = %lor.rhs, %while.cond
- indirectbr i8* undef, [label %while.cond]
+ indirectbr ptr undef, [label %while.cond]
while.end: ; preds = %lor.rhs
- indirectbr i8* undef, [label %if.then18, label %return]
+ indirectbr ptr undef, [label %if.then18, label %return]
if.then18: ; preds = %while.end
- indirectbr i8* undef, [label %if.end35, label %lor.lhs.false]
+ indirectbr ptr undef, [label %if.end35, label %lor.lhs.false]
lor.lhs.false: ; preds = %if.then18
- indirectbr i8* undef, [label %if.end35, label %return]
+ indirectbr ptr undef, [label %if.end35, label %return]
if.end35: ; preds = %lor.lhs.false, %if.then18
- indirectbr i8* undef, [label %while.cond36]
+ indirectbr ptr undef, [label %while.cond36]
while.cond36: ; preds = %while.body49, %if.end35
%0 = phi i64 [ %indvar.next13, %while.body49 ], [ 0, %if.end35 ]
- indirectbr i8* undef, [label %while.body49, label %lor.rhs42]
+ indirectbr ptr undef, [label %while.body49, label %lor.rhs42]
lor.rhs42: ; preds = %while.cond36
- indirectbr i8* undef, [label %while.body49, label %while.end52]
+ indirectbr ptr undef, [label %while.body49, label %while.end52]
while.body49: ; preds = %lor.rhs42, %while.cond36
%indvar.next13 = add i64 %0, 1
- indirectbr i8* undef, [label %while.cond36]
+ indirectbr ptr undef, [label %while.cond36]
while.end52: ; preds = %lor.rhs42
- indirectbr i8* undef, [label %land.lhs.true, label %return]
+ indirectbr ptr undef, [label %land.lhs.true, label %return]
land.lhs.true: ; preds = %while.end52
- indirectbr i8* undef, [label %while.cond66.preheader, label %return]
+ indirectbr ptr undef, [label %while.cond66.preheader, label %return]
while.cond66.preheader: ; preds = %land.lhs.true
- indirectbr i8* undef, [label %while.cond66]
+ indirectbr ptr undef, [label %while.cond66]
while.cond66: ; preds = %while.body77, %while.cond66.preheader
- indirectbr i8* undef, [label %land.rhs, label %while.cond81.preheader]
+ indirectbr ptr undef, [label %land.rhs, label %while.cond81.preheader]
land.rhs: ; preds = %while.cond66
- indirectbr i8* undef, [label %while.body77, label %while.cond81.preheader]
+ indirectbr ptr undef, [label %while.body77, label %while.cond81.preheader]
while.cond81.preheader: ; preds = %land.rhs, %while.cond66
%tmp45 = add i64 undef, %0
%tmp46 = add i64 %tmp45, undef
- indirectbr i8* undef, [label %while.cond81]
+ indirectbr ptr undef, [label %while.cond81]
while.body77: ; preds = %land.rhs
- indirectbr i8* undef, [label %while.cond66]
+ indirectbr ptr undef, [label %while.cond66]
while.cond81: ; preds = %while.body94, %while.cond81.preheader
%tmp25 = add i64 %tmp46, undef
- indirectbr i8* undef, [label %while.body94, label %lor.rhs87]
+ indirectbr ptr undef, [label %while.body94, label %lor.rhs87]
lor.rhs87: ; preds = %while.cond81
- indirectbr i8* undef, [label %while.body94, label %return]
+ indirectbr ptr undef, [label %while.body94, label %return]
while.body94: ; preds = %lor.rhs87, %while.cond81
- indirectbr i8* undef, [label %while.cond81]
+ indirectbr ptr undef, [label %while.cond81]
return: ; preds = %if.end216, %land.lhs.true183, %land.lhs.true, %while.end52, %lor.lhs.false, %while.end, %entry
ret void
; Test a phi operand IV User dominated by a no-preheader loop.
define void @nopreheader3() nounwind uwtable ssp align 2 {
entry:
- indirectbr i8* blockaddress(@nopreheader3, %if.end10), [label %if.end22, label %if.end10]
+ indirectbr ptr blockaddress(@nopreheader3, %if.end10), [label %if.end22, label %if.end10]
if.end10: ; preds = %entry
- indirectbr i8* blockaddress(@nopreheader3, %if.end6.i), [label %if.end22, label %if.end6.i]
+ indirectbr ptr blockaddress(@nopreheader3, %if.end6.i), [label %if.end22, label %if.end6.i]
if.end6.i: ; preds = %if.end10
- indirectbr i8* blockaddress(@nopreheader3, %while.cond2.preheader.i.i), [label %if.then12, label %while.cond2.preheader.i.i]
+ indirectbr ptr blockaddress(@nopreheader3, %while.cond2.preheader.i.i), [label %if.then12, label %while.cond2.preheader.i.i]
while.cond2.preheader.i.i: ; preds = %while.end.i18.i, %if.end6.i
- indirectbr i8* blockaddress(@nopreheader3, %while.cond2.i.i), [label %while.cond2.i.i]
+ indirectbr ptr blockaddress(@nopreheader3, %while.cond2.i.i), [label %while.cond2.i.i]
while.cond2.i.i: ; preds = %while.cond2.i.i, %while.cond2.preheader.i.i
%i1.1.i14.i = phi i32 [ %add.i15.i, %while.cond2.i.i ], [ undef, %while.cond2.preheader.i.i ]
%add.i15.i = add nsw i32 %i1.1.i14.i, undef
- indirectbr i8* blockaddress(@nopreheader3, %while.end.i18.i), [label %while.cond2.i.i, label %while.end.i18.i]
+ indirectbr ptr blockaddress(@nopreheader3, %while.end.i18.i), [label %while.cond2.i.i, label %while.end.i18.i]
while.end.i18.i: ; preds = %while.cond2.i.i
- indirectbr i8* blockaddress(@nopreheader3, %while.cond2.preheader.i.i), [label %if.then12, label %while.cond2.preheader.i.i]
+ indirectbr ptr blockaddress(@nopreheader3, %while.cond2.preheader.i.i), [label %if.then12, label %while.cond2.preheader.i.i]
if.then12: ; preds = %while.end.i18.i, %if.end6.i
%i1.0.lcssa.i.i = phi i32 [ undef, %if.end6.i ], [ %i1.1.i14.i, %while.end.i18.i ]
- indirectbr i8* blockaddress(@nopreheader3, %if.end22), [label %if.end22]
+ indirectbr ptr blockaddress(@nopreheader3, %if.end22), [label %if.end22]
if.end22: ; preds = %if.then12, %if.end10, %entry
ret void
; check it. As long as the analysis doesn't crash we're ok.
target datalayout = "e-p:64:64:64-n32:64"
-%struct.this_structure_s.0.5 = type { [6144 x [8 x i32]], [6144 x [8 x i32]], [6147 x [4 x i32]], [8 x i32], [2 x i8*], [2 x i8*], [6144 x i8], [6144 x i32], [6144 x i32], [4 x [4 x i8]] }
+%struct.this_structure_s.0.5 = type { [6144 x [8 x i32]], [6144 x [8 x i32]], [6147 x [4 x i32]], [8 x i32], [2 x ptr], [2 x ptr], [6144 x i8], [6144 x i32], [6144 x i32], [4 x [4 x i8]] }
-define internal fastcc void @someFunction(%struct.this_structure_s.0.5* nocapture %scratch, i32 %stage, i32 %cbSize) nounwind {
+define internal fastcc void @someFunction(ptr nocapture %scratch, i32 %stage, i32 %cbSize) nounwind {
entry:
- %0 = getelementptr inbounds %struct.this_structure_s.0.5, %struct.this_structure_s.0.5* %scratch, i32 0, i32 4, i32 %stage
- %1 = load i8*, i8** %0, align 4
- %2 = getelementptr inbounds %struct.this_structure_s.0.5, %struct.this_structure_s.0.5* %scratch, i32 0, i32 5, i32 %stage
- %3 = load i8*, i8** %2, align 4
- %4 = getelementptr inbounds %struct.this_structure_s.0.5, %struct.this_structure_s.0.5* %scratch, i32 0, i32 2, i32 0, i32 0
+ %0 = getelementptr inbounds %struct.this_structure_s.0.5, ptr %scratch, i32 0, i32 4, i32 %stage
+ %1 = load ptr, ptr %0, align 4
+ %2 = getelementptr inbounds %struct.this_structure_s.0.5, ptr %scratch, i32 0, i32 5, i32 %stage
+ %3 = load ptr, ptr %2, align 4
+ %4 = getelementptr inbounds %struct.this_structure_s.0.5, ptr %scratch, i32 0, i32 2, i32 0, i32 0
%tmp11 = shl i32 %stage, 1
%tmp1325 = or i32 %tmp11, 1
br label %__label_D_1608
__label_D_1608: ; preds = %__label_D_1608, %entry
%i.12 = phi i32 [ 0, %entry ], [ %10, %__label_D_1608 ]
%tmp = shl i32 %i.12, 2
- %lvar_g.13 = getelementptr i32, i32* %4, i32 %tmp
+ %lvar_g.13 = getelementptr i32, ptr %4, i32 %tmp
%tmp626 = or i32 %tmp, 1
- %scevgep = getelementptr i32, i32* %4, i32 %tmp626
+ %scevgep = getelementptr i32, ptr %4, i32 %tmp626
%tmp727 = or i32 %tmp, 2
- %scevgep8 = getelementptr i32, i32* %4, i32 %tmp727
+ %scevgep8 = getelementptr i32, ptr %4, i32 %tmp727
%tmp928 = or i32 %tmp, 3
- %scevgep10 = getelementptr i32, i32* %4, i32 %tmp928
- %scevgep12 = getelementptr %struct.this_structure_s.0.5, %struct.this_structure_s.0.5* %scratch, i32 0, i32 9, i32 %tmp11, i32 %i.12
- %scevgep14 = getelementptr %struct.this_structure_s.0.5, %struct.this_structure_s.0.5* %scratch, i32 0, i32 9, i32 %tmp1325, i32 %i.12
- %5 = load i8, i8* %scevgep12, align 1
+ %scevgep10 = getelementptr i32, ptr %4, i32 %tmp928
+ %scevgep12 = getelementptr %struct.this_structure_s.0.5, ptr %scratch, i32 0, i32 9, i32 %tmp11, i32 %i.12
+ %scevgep14 = getelementptr %struct.this_structure_s.0.5, ptr %scratch, i32 0, i32 9, i32 %tmp1325, i32 %i.12
+ %5 = load i8, ptr %scevgep12, align 1
%6 = sext i8 %5 to i32
- %7 = load i8, i8* %scevgep14, align 1
+ %7 = load i8, ptr %scevgep14, align 1
%8 = sext i8 %7 to i32
- store i32 0, i32* %lvar_g.13, align 4
- store i32 %8, i32* %scevgep, align 4
- store i32 %6, i32* %scevgep8, align 4
+ store i32 0, ptr %lvar_g.13, align 4
+ store i32 %8, ptr %scevgep, align 4
+ store i32 %6, ptr %scevgep8, align 4
%9 = add nsw i32 %8, %6
- store i32 %9, i32* %scevgep10, align 4
+ store i32 %9, ptr %scevgep10, align 4
%10 = add nsw i32 %i.12, 1
%exitcond = icmp eq i32 %10, 3
br i1 %exitcond, label %return, label %__label_D_1608
define i32 @main() nounwind uwtable ssp {
entry:
%l_2 = alloca [1 x i32], align 4
- %arrayidx = getelementptr inbounds [1 x i32], [1 x i32]* %l_2, i64 0, i64 0
- store i32 0, i32* %arrayidx, align 4
- %tmp = load i32, i32* @g_3, align 4
+ store i32 0, ptr %l_2, align 4
+ %tmp = load i32, ptr @g_3, align 4
%idxprom = sext i32 %tmp to i64
- %arrayidx1 = getelementptr inbounds [1 x i32], [1 x i32]* %l_2, i64 0, i64 %idxprom
- %tmp1 = load i32, i32* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds [1 x i32], ptr %l_2, i64 0, i64 %idxprom
+ %tmp1 = load i32, ptr %arrayidx1, align 4
%conv.i.i = and i32 %tmp1, 65535
%tobool.i.i.i = icmp ne i32 %tmp, 0
br label %codeRepl
for.inc.i.i.us: ; preds = %for.body.i.i.us
%add.i.i.us = add nsw i32 %tmp2, 1
- store i32 %add.i.i.us, i32* @g_752, align 4
+ store i32 %add.i.i.us, ptr @g_752, align 4
br label %for.cond.i.i.us
for.body.i.i.us: ; preds = %codeRepl5.us
for.inc.i.i: ; preds = %for.body.i.i
%add.i.i = add nsw i32 %tmp3, 1
- store i32 %add.i.i, i32* @g_752, align 4
+ store i32 %add.i.i, ptr @g_752, align 4
br label %for.cond.i.i
func_4.exit: ; No predecessors!
%struct.snork = type { %struct.fuga, i32, i32, i32, i32, i32, i32 }
%struct.fuga = type { %struct.gork, i64 }
-%struct.gork = type { i8*, i32, i32, %struct.noot* }
+%struct.gork = type { ptr, i32, i32, ptr }
%struct.noot = type opaque
%struct.jim = type { [5120 x i8], i32, i32, [2048 x i8], i32, [256 x i8] }
br i1 %tmp4, label %bb6, label %bb5
bb5: ; preds = %bb
- tail call void (...) @snork(i8* getelementptr inbounds ([52 x i8], [52 x i8]* @global1, i64 0, i64 0), i32 2021) nounwind
- tail call void (...) @snork(i8* getelementptr inbounds (%struct.jim, %struct.jim* @global3, i64 0, i32 3, i64 1), i32 -2146631418) nounwind
+ tail call void (...) @snork(ptr @global1, i32 2021) nounwind
+ tail call void (...) @snork(ptr getelementptr inbounds (%struct.jim, ptr @global3, i64 0, i32 3, i64 1), i32 -2146631418) nounwind
unreachable
bb6: ; preds = %bb
- tail call void @zot(i8* getelementptr inbounds (%struct.jim, %struct.jim* @global3, i64 0, i32 5, i64 0), i8* getelementptr inbounds (%struct.jim, %struct.jim* @global3, i64 0, i32 3, i64 1), i64 undef, i32 1, i1 false) nounwind
- %tmp7 = getelementptr inbounds %struct.jim, %struct.jim* @global3, i64 0, i32 5, i64 undef
- store i8 0, i8* %tmp7, align 1
+ tail call void @zot(ptr getelementptr inbounds (%struct.jim, ptr @global3, i64 0, i32 5, i64 0), ptr getelementptr inbounds (%struct.jim, ptr @global3, i64 0, i32 3, i64 1), i64 undef, i32 1, i1 false) nounwind
+ %tmp7 = getelementptr inbounds %struct.jim, ptr @global3, i64 0, i32 5, i64 undef
+ store i8 0, ptr %tmp7, align 1
%tmp8 = add nsw i32 0, 1
%tmp9 = sext i32 %tmp8 to i64
%tmp10 = add i64 %tmp9, 1
- %tmp11 = getelementptr inbounds %struct.jim, %struct.jim* @global3, i64 0, i32 3, i64 %tmp10
+ %tmp11 = getelementptr inbounds %struct.jim, ptr @global3, i64 0, i32 3, i64 %tmp10
%tmp12 = sub i64 2047, %tmp9
%tmp13 = icmp eq i32 undef, 1
br i1 %tmp13, label %bb14, label %bb15
br i1 %tmp21, label %bb22, label %bb32
bb22: ; preds = %bb17
- %tmp23 = getelementptr inbounds %struct.jim, %struct.jim* @global3, i64 0, i32 3, i64 0
- %tmp24 = load i8, i8* %tmp23, align 1
+ %tmp23 = getelementptr inbounds %struct.jim, ptr @global3, i64 0, i32 3, i64 0
+ %tmp24 = load i8, ptr %tmp23, align 1
%tmp25 = icmp eq i8 %tmp24, 58
br i1 %tmp25, label %bb30, label %bb26
br i1 %tmp31, label %bb33, label %bb32
bb32: ; preds = %bb30, %bb26, %bb17
- tail call void (...) @snork(i8* getelementptr inbounds ([52 x i8], [52 x i8]* @global1, i64 0, i64 0), i32 2038) nounwind
- tail call void (...) @snork(i8* %tmp11, i32 -2146631418) nounwind
+ tail call void (...) @snork(ptr @global1, i32 2038) nounwind
+ tail call void (...) @snork(ptr %tmp11, i32 -2146631418) nounwind
unreachable
bb33: ; preds = %bb30
- tail call void @zot(i8* getelementptr inbounds (%struct.jim, %struct.jim* @global3, i64 0, i32 5, i64 0), i8* %tmp11, i64 undef, i32 1, i1 false) nounwind
- %tmp34 = getelementptr inbounds %struct.jim, %struct.jim* @global3, i64 0, i32 5, i64 undef
- store i8 0, i8* %tmp34, align 1
+ tail call void @zot(ptr getelementptr inbounds (%struct.jim, ptr @global3, i64 0, i32 5, i64 0), ptr %tmp11, i64 undef, i32 1, i1 false) nounwind
+ %tmp34 = getelementptr inbounds %struct.jim, ptr @global3, i64 0, i32 5, i64 undef
+ store i8 0, ptr %tmp34, align 1
%tmp35 = add nsw i32 %tmp19, 1
%tmp36 = sext i32 %tmp35 to i64
%tmp37 = add i64 %tmp36, %tmp10
- %tmp38 = getelementptr inbounds %struct.jim, %struct.jim* @global3, i64 0, i32 3, i64 %tmp37
+ %tmp38 = getelementptr inbounds %struct.jim, ptr @global3, i64 0, i32 3, i64 %tmp37
%tmp39 = sub i64 %tmp12, %tmp36
br i1 false, label %bb40, label %bb41
bb48: ; preds = %bb43
%tmp49 = add i64 %tmp44, %tmp37
- %tmp50 = load i8, i8* undef, align 1
+ %tmp50 = load i8, ptr undef, align 1
%tmp51 = icmp eq i8 %tmp50, 58
br i1 %tmp51, label %bb55, label %bb52
br i1 %tmp57, label %bb59, label %bb58
bb58: ; preds = %bb55, %bb52, %bb43
- tail call void (...) @snork(i8* getelementptr inbounds ([52 x i8], [52 x i8]* @global1, i64 0, i64 0), i32 2055) nounwind
- tail call void (...) @snork(i8* %tmp38, i32 -2146631418) nounwind
+ tail call void (...) @snork(ptr @global1, i32 2055) nounwind
+ tail call void (...) @snork(ptr %tmp38, i32 -2146631418) nounwind
br label %bb247
bb59: ; preds = %bb55
%tmp60 = sext i32 %tmp45 to i64
- tail call void @zot(i8* getelementptr inbounds (%struct.jim, %struct.jim* @global3, i64 0, i32 5, i64 0), i8* %tmp38, i64 %tmp60, i32 1, i1 false) nounwind
- %tmp61 = getelementptr inbounds %struct.jim, %struct.jim* @global3, i64 0, i32 5, i64 %tmp60
- store i8 0, i8* %tmp61, align 1
+ tail call void @zot(ptr getelementptr inbounds (%struct.jim, ptr @global3, i64 0, i32 5, i64 0), ptr %tmp38, i64 %tmp60, i32 1, i1 false) nounwind
+ %tmp61 = getelementptr inbounds %struct.jim, ptr @global3, i64 0, i32 5, i64 %tmp60
+ store i8 0, ptr %tmp61, align 1
%tmp62 = add nsw i32 %tmp45, 1
%tmp63 = sext i32 %tmp62 to i64
%tmp64 = add i64 %tmp63, %tmp37
]
bb69: ; preds = %bb68
- tail call void (...) @snork(i8* getelementptr inbounds ([52 x i8], [52 x i8]* @global1, i64 0, i64 0), i32 2071) nounwind
- %tmp70 = load i32, i32* getelementptr inbounds (%struct.snork, %struct.snork* @global, i64 0, i32 2), align 4
+ tail call void (...) @snork(ptr @global1, i32 2071) nounwind
+ %tmp70 = load i32, ptr getelementptr inbounds (%struct.snork, ptr @global, i64 0, i32 2), align 4
unreachable
bb71: ; preds = %bb68
- %tmp72 = load i32, i32* getelementptr inbounds (%struct.snork, %struct.snork* @global, i64 0, i32 4), align 4
+ %tmp72 = load i32, ptr getelementptr inbounds (%struct.snork, ptr @global, i64 0, i32 4), align 4
%tmp73 = icmp eq i32 undef, 0
br i1 %tmp73, label %bb247, label %bb74
bb226: ; preds = %bb221
%tmp227 = add i64 %tmp222, %tmp216
- %tmp228 = getelementptr inbounds %struct.jim, %struct.jim* @global3, i64 0, i32 3, i64 %tmp227
- %tmp229 = load i8, i8* %tmp228, align 1
+ %tmp228 = getelementptr inbounds %struct.jim, ptr @global3, i64 0, i32 3, i64 %tmp227
+ %tmp229 = load i8, ptr %tmp228, align 1
br i1 false, label %bb233, label %bb230
bb230: ; preds = %bb226
ret void
}
-declare void @zot(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @zot(ptr nocapture, ptr nocapture, i64, i32, i1) nounwind
%tmp = phi i8 [ %tmp14, %bb11 ], [ 25, %bb190 ]
%tmp9 = phi i8 [ %tmp12, %bb11 ], [ 25, %bb190 ]
%tmp10 = add i8 %tmp, -5
- indirectbr i8* undef, [label %bb11, label %bb15]
+ indirectbr ptr undef, [label %bb11, label %bb15]
bb11: ; preds = %bb8
%tmp12 = add i8 %tmp9, 1
%tmp13 = add i8 %tmp9, -19
%tmp14 = add i8 %tmp, 1
- indirectbr i8* undef, [label %bb8]
+ indirectbr ptr undef, [label %bb8]
bb15: ; preds = %bb8
- indirectbr i8* undef, [label %bb16]
+ indirectbr ptr undef, [label %bb16]
bb16: ; preds = %bb16, %bb15
- indirectbr i8* undef, [label %bb37, label %bb190]
+ indirectbr ptr undef, [label %bb37, label %bb190]
bb37: ; preds = %bb190
- indirectbr i8* undef, [label %bb38]
+ indirectbr ptr undef, [label %bb38]
bb38: ; preds = %bb37, %bb5
ret void
bb190: ; preds = %bb189, %bb187
- indirectbr i8* undef, [label %bb37, label %bb8]
+ indirectbr ptr undef, [label %bb37, label %bb8]
}
; CHECK: bb:
; "dead" ptrpoint not emitted (or dead code eliminated) with
; current LSR cost model.
-; CHECK-NOT: = ptrtoint i8* undef to i64
+; CHECK-NOT: = ptrtoint ptr undef to i64
; CHECK: .lr.ph
; CHECK: ret void
define void @VerifyDiagnosticConsumerTest() unnamed_addr nounwind uwtable align 2 {
bb:
- %tmp3 = call i8* @getCharData() nounwind
- %tmp4 = call i8* @getCharData() nounwind
- %tmp5 = ptrtoint i8* %tmp4 to i64
- %tmp6 = ptrtoint i8* %tmp3 to i64
+ %tmp3 = call ptr @getCharData() nounwind
+ %tmp4 = call ptr @getCharData() nounwind
+ %tmp5 = ptrtoint ptr %tmp4 to i64
+ %tmp6 = ptrtoint ptr %tmp3 to i64
%tmp7 = sub i64 %tmp5, %tmp6
br i1 undef, label %bb87, label %.preheader
br i1 %tmp62, label %_ZNK4llvm9StringRef4findEcm.exit._crit_edge, label %bb63
bb63: ; preds = %bb61
- %tmp64 = getelementptr inbounds i8, i8* %tmp3, i64 %i.0.i
- %tmp65 = load i8, i8* %tmp64, align 1
+ %tmp64 = getelementptr inbounds i8, ptr %tmp3, i64 %i.0.i
+ %tmp65 = load i8, ptr %tmp64, align 1
%tmp67 = add i64 %i.0.i, 1
br i1 undef, label %_ZNK4llvm9StringRef4findEcm.exit.loopexit, label %bb61
ret void
}
-declare i8* @getCharData()
+declare ptr @getCharData()
; CHECK: [[IVREG]], #8
; CHECK-NEXT: cmp [[IVREG]], #7
; CHECK-NEXT: b.hi
-define i8* @memset(i8* %dest, i32 %val, i64 %len) nounwind ssp noimplicitfloat {
+define ptr @memset(ptr %dest, i32 %val, i64 %len) nounwind ssp noimplicitfloat {
entry:
%cmp = icmp eq i64 %len, 0
br i1 %cmp, label %done, label %while.cond.preheader
br label %while.cond
while.cond: ; preds = %while.body, %while.cond.preheader
- %ptr.0 = phi i8* [ %incdec.ptr, %while.body ], [ %dest, %while.cond.preheader ]
+ %ptr.0 = phi ptr [ %incdec.ptr, %while.body ], [ %dest, %while.cond.preheader ]
%len.addr.0 = phi i64 [ %dec, %while.body ], [ %len, %while.cond.preheader ]
%cond = icmp eq i64 %len.addr.0, 0
br i1 %cond, label %done, label %land.rhs
land.rhs: ; preds = %while.cond
- %0 = ptrtoint i8* %ptr.0 to i64
+ %0 = ptrtoint ptr %ptr.0 to i64
%and = and i64 %0, 7
%cmp5 = icmp eq i64 %and, 0
br i1 %cmp5, label %if.end9, label %while.body
while.body: ; preds = %land.rhs
- %incdec.ptr = getelementptr inbounds i8, i8* %ptr.0, i64 1
- store i8 %conv, i8* %ptr.0, align 1, !tbaa !0
+ %incdec.ptr = getelementptr inbounds i8, ptr %ptr.0, i64 1
+ store i8 %conv, ptr %ptr.0, align 1, !tbaa !0
%dec = add i64 %len.addr.0, -1
br label %while.cond
%mask2.masked = or i64 %mask5, %6
%mask = or i64 %mask2.masked, %7
%ins = or i64 %mask, %8
- %9 = bitcast i8* %ptr.0 to i64*
%cmp1636 = icmp ugt i64 %len.addr.0, 7
br i1 %cmp1636, label %while.body18, label %while.body29.lr.ph
while.body18: ; preds = %if.end9, %while.body18
- %wideptr.038 = phi i64* [ %incdec.ptr19, %while.body18 ], [ %9, %if.end9 ]
+ %wideptr.038 = phi ptr [ %incdec.ptr19, %while.body18 ], [ %ptr.0, %if.end9 ]
%len.addr.137 = phi i64 [ %sub, %while.body18 ], [ %len.addr.0, %if.end9 ]
- %incdec.ptr19 = getelementptr inbounds i64, i64* %wideptr.038, i64 1
- store i64 %ins, i64* %wideptr.038, align 8, !tbaa !2
+ %incdec.ptr19 = getelementptr inbounds i64, ptr %wideptr.038, i64 1
+ store i64 %ins, ptr %wideptr.038, align 8, !tbaa !2
%sub = add i64 %len.addr.137, -8
%cmp16 = icmp ugt i64 %sub, 7
br i1 %cmp16, label %while.body18, label %while.end20
while.body29.lr.ph: ; preds = %while.end20, %if.end9
%len.addr.1.lcssa49 = phi i64 [ %sub, %while.end20 ], [ %len.addr.0, %if.end9 ]
- %wideptr.0.lcssa48 = phi i64* [ %incdec.ptr19, %while.end20 ], [ %9, %if.end9 ]
- %10 = bitcast i64* %wideptr.0.lcssa48 to i8*
+ %wideptr.0.lcssa48 = phi ptr [ %incdec.ptr19, %while.end20 ], [ %ptr.0, %if.end9 ]
br label %while.body29
while.body29: ; preds = %while.body29, %while.body29.lr.ph
%len.addr.235 = phi i64 [ %len.addr.1.lcssa49, %while.body29.lr.ph ], [ %dec26, %while.body29 ]
- %ptr.134 = phi i8* [ %10, %while.body29.lr.ph ], [ %incdec.ptr31, %while.body29 ]
+ %ptr.134 = phi ptr [ %wideptr.0.lcssa48, %while.body29.lr.ph ], [ %incdec.ptr31, %while.body29 ]
%dec26 = add i64 %len.addr.235, -1
- %incdec.ptr31 = getelementptr inbounds i8, i8* %ptr.134, i64 1
- store i8 %conv, i8* %ptr.134, align 1, !tbaa !0
+ %incdec.ptr31 = getelementptr inbounds i8, ptr %ptr.134, i64 1
+ store i8 %conv, ptr %ptr.134, align 1, !tbaa !0
%cmp27 = icmp eq i64 %dec26, 0
br i1 %cmp27, label %done, label %while.body29
done: ; preds = %while.cond, %while.body29, %while.end20, %entry
- ret i8* %dest
+ ret ptr %dest
}
!0 = !{!"omnipotent char", !1}
%"Type" = type <{[166 x [338 x i8]]}>
-define void @test_lsr_pre_inc_offset_check(%"Type"* %p) {
+define void @test_lsr_pre_inc_offset_check(ptr %p) {
; CHECK-LABEL: test_lsr_pre_inc_offset_check:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: mov w8, #165
exit:
ret void
if.then:
- %arrayidx.i = getelementptr inbounds %"Type", %"Type"* %p, i64 0, i32 0, i64 %indvars, i64 1
- %0 = bitcast i8* %arrayidx.i to i32*
- store i32 0, i32* %0, align 1
+ %arrayidx.i = getelementptr inbounds %"Type", ptr %p, i64 0, i32 0, i64 %indvars, i64 1
+ store i32 0, ptr %arrayidx.i, align 1
br label %if.end
if.end:
- %arrayidx.p = getelementptr inbounds %"Type", %"Type"* %p, i64 0, i32 0, i64 %indvars, i64 2
- store i8 2, i8* %arrayidx.p, align 1
+ %arrayidx.p = getelementptr inbounds %"Type", ptr %p, i64 0, i32 0, i64 %indvars, i64 2
+ store i8 2, ptr %arrayidx.p, align 1
%indvars.iv.next = add nuw nsw i64 %indvars, 1
%add.i = add nuw i8 %begin, 1
%cmp.i.not = icmp eq i64 %indvars.iv.next, 166
; Verify that redundant adds aren't inserted by LSR.
; CHECK-LABEL: @bar(
-define void @bar(double* %A) {
+define void @bar(ptr %A) {
entry:
br label %while.cond
land.rhs:
%indvars.iv.next29 = add nsw i64 %indvars.iv28, -1
- %arrayidx = getelementptr inbounds double, double* %A, i64 %indvars.iv.next29
- %Aload = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %A, i64 %indvars.iv.next29
+ %Aload = load double, ptr %arrayidx, align 8
%cmp1 = fcmp oeq double %Aload, 0.000000e+00
br i1 %cmp1, label %while.cond, label %if.end
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "aarch64-unknown-linux-gnu"
-@d = internal unnamed_addr global i32** null, align 8
+@d = internal unnamed_addr global ptr null, align 8
define dso_local i32 @main() local_unnamed_addr {
entry:
- %.pre.pre = load i32**, i32*** @d, align 8
+ %.pre.pre = load ptr, ptr @d, align 8
br label %for.body9
for.body9: ; preds = %for.body9, %entry
- %i = phi i32** [ %.pre.pre, %entry ], [ %incdec.ptr, %for.body9 ]
- %incdec.ptr = getelementptr inbounds i32*, i32** %i, i64 -1
+ %i = phi ptr [ %.pre.pre, %entry ], [ %incdec.ptr, %for.body9 ]
+ %incdec.ptr = getelementptr inbounds ptr, ptr %i, i64 -1
br i1 undef, label %for.body9, label %for.inc
for.inc: ; preds = %for.body9
br label %for.body9.118
for.body9.1: ; preds = %for.inc.547, %for.body9.1
- %i1 = phi i32** [ %incdec.ptr.1, %for.body9.1 ], [ %incdec.ptr.542, %for.inc.547 ]
- %incdec.ptr.1 = getelementptr inbounds i32*, i32** %i1, i64 -1
+ %i1 = phi ptr [ %incdec.ptr.1, %for.body9.1 ], [ %incdec.ptr.542, %for.inc.547 ]
+ %incdec.ptr.1 = getelementptr inbounds ptr, ptr %i1, i64 -1
br i1 undef, label %for.body9.1, label %for.inc.1
for.inc.1: ; preds = %for.body9.1
br label %for.body9.1.1
for.body9.2: ; preds = %for.inc.1.5, %for.body9.2
- %i2 = phi i32** [ %incdec.ptr.2, %for.body9.2 ], [ %incdec.ptr.1.5, %for.inc.1.5 ]
- %incdec.ptr.2 = getelementptr inbounds i32*, i32** %i2, i64 -1
+ %i2 = phi ptr [ %incdec.ptr.2, %for.body9.2 ], [ %incdec.ptr.1.5, %for.inc.1.5 ]
+ %incdec.ptr.2 = getelementptr inbounds ptr, ptr %i2, i64 -1
br i1 undef, label %for.body9.2, label %for.inc.2
for.inc.2: ; preds = %for.body9.2
br label %for.body9.2.1
for.body9.3: ; preds = %for.inc.2.5, %for.body9.3
- %i3 = phi i32** [ %incdec.ptr.3, %for.body9.3 ], [ %incdec.ptr.2.5, %for.inc.2.5 ]
- %incdec.ptr.3 = getelementptr inbounds i32*, i32** %i3, i64 -1
+ %i3 = phi ptr [ %incdec.ptr.3, %for.body9.3 ], [ %incdec.ptr.2.5, %for.inc.2.5 ]
+ %incdec.ptr.3 = getelementptr inbounds ptr, ptr %i3, i64 -1
br i1 undef, label %for.body9.3, label %for.inc.3
for.inc.3: ; preds = %for.body9.3
br label %for.body9.3.1
for.body9.4: ; preds = %for.inc.3.5, %for.body9.4
- %i4 = phi i32** [ %incdec.ptr.4, %for.body9.4 ], [ %incdec.ptr.3.5, %for.inc.3.5 ]
- %incdec.ptr.4 = getelementptr inbounds i32*, i32** %i4, i64 -1
+ %i4 = phi ptr [ %incdec.ptr.4, %for.body9.4 ], [ %incdec.ptr.3.5, %for.inc.3.5 ]
+ %incdec.ptr.4 = getelementptr inbounds ptr, ptr %i4, i64 -1
br i1 undef, label %for.body9.4, label %for.inc.4
for.inc.4: ; preds = %for.body9.4
br label %for.body9.4.1
for.body9.5: ; preds = %for.inc.4.5, %for.body9.5
- %i5 = phi i32** [ %incdec.ptr.5, %for.body9.5 ], [ %incdec.ptr.4.5, %for.inc.4.5 ]
- %incdec.ptr.5 = getelementptr inbounds i32*, i32** %i5, i64 -1
+ %i5 = phi ptr [ %incdec.ptr.5, %for.body9.5 ], [ %incdec.ptr.4.5, %for.inc.4.5 ]
+ %incdec.ptr.5 = getelementptr inbounds ptr, ptr %i5, i64 -1
br i1 undef, label %for.body9.5, label %for.inc.5
for.inc.5: ; preds = %for.body9.5
br label %for.body9.5.1
for.body9.5.1: ; preds = %for.body9.5.1, %for.inc.5
- %i6 = phi i32** [ %incdec.ptr.5.1, %for.body9.5.1 ], [ %incdec.ptr.5, %for.inc.5 ]
- %incdec.ptr.5.1 = getelementptr inbounds i32*, i32** %i6, i64 -1
+ %i6 = phi ptr [ %incdec.ptr.5.1, %for.body9.5.1 ], [ %incdec.ptr.5, %for.inc.5 ]
+ %incdec.ptr.5.1 = getelementptr inbounds ptr, ptr %i6, i64 -1
br i1 undef, label %for.body9.5.1, label %for.inc.5.1
for.inc.5.1: ; preds = %for.body9.5.1
br label %for.body9.5.2
for.body9.5.2: ; preds = %for.body9.5.2, %for.inc.5.1
- %i7 = phi i32** [ %incdec.ptr.5.2, %for.body9.5.2 ], [ %incdec.ptr.5.1, %for.inc.5.1 ]
- %incdec.ptr.5.2 = getelementptr inbounds i32*, i32** %i7, i64 -1
+ %i7 = phi ptr [ %incdec.ptr.5.2, %for.body9.5.2 ], [ %incdec.ptr.5.1, %for.inc.5.1 ]
+ %incdec.ptr.5.2 = getelementptr inbounds ptr, ptr %i7, i64 -1
br i1 undef, label %for.body9.5.2, label %for.inc.5.2
for.inc.5.2: ; preds = %for.body9.5.2
br label %for.body9.5.3
for.body9.5.3: ; preds = %for.body9.5.3, %for.inc.5.2
- %i8 = phi i32** [ %incdec.ptr.5.3, %for.body9.5.3 ], [ %incdec.ptr.5.2, %for.inc.5.2 ]
- %incdec.ptr.5.3 = getelementptr inbounds i32*, i32** %i8, i64 -1
+ %i8 = phi ptr [ %incdec.ptr.5.3, %for.body9.5.3 ], [ %incdec.ptr.5.2, %for.inc.5.2 ]
+ %incdec.ptr.5.3 = getelementptr inbounds ptr, ptr %i8, i64 -1
br i1 undef, label %for.body9.5.3, label %for.inc.5.3
for.inc.5.3: ; preds = %for.body9.5.3
br label %for.body9.5.4
for.body9.5.4: ; preds = %for.body9.5.4, %for.inc.5.3
- %i9 = phi i32** [ %incdec.ptr.5.4, %for.body9.5.4 ], [ %incdec.ptr.5.3, %for.inc.5.3 ]
- %incdec.ptr.5.4 = getelementptr inbounds i32*, i32** %i9, i64 -1
+ %i9 = phi ptr [ %incdec.ptr.5.4, %for.body9.5.4 ], [ %incdec.ptr.5.3, %for.inc.5.3 ]
+ %incdec.ptr.5.4 = getelementptr inbounds ptr, ptr %i9, i64 -1
br i1 undef, label %for.body9.5.4, label %for.inc.5.4
for.inc.5.4: ; preds = %for.body9.5.4
br label %for.body9.5.5
for.body9.5.5: ; preds = %for.body9.5.5, %for.inc.5.4
- %i10 = phi i32** [ undef, %for.body9.5.5 ], [ %incdec.ptr.5.4, %for.inc.5.4 ]
- %i11 = bitcast i32** %i10 to i64*
- %i12 = load i64, i64* %i11, align 8
+ %i10 = phi ptr [ undef, %for.body9.5.5 ], [ %incdec.ptr.5.4, %for.inc.5.4 ]
+ %i12 = load i64, ptr %i10, align 8
br label %for.body9.5.5
for.body9.4.1: ; preds = %for.body9.4.1, %for.inc.4
- %i13 = phi i32** [ %incdec.ptr.4.1, %for.body9.4.1 ], [ %incdec.ptr.4, %for.inc.4 ]
- %incdec.ptr.4.1 = getelementptr inbounds i32*, i32** %i13, i64 -1
+ %i13 = phi ptr [ %incdec.ptr.4.1, %for.body9.4.1 ], [ %incdec.ptr.4, %for.inc.4 ]
+ %incdec.ptr.4.1 = getelementptr inbounds ptr, ptr %i13, i64 -1
br i1 undef, label %for.body9.4.1, label %for.inc.4.1
for.inc.4.1: ; preds = %for.body9.4.1
br label %for.body9.4.2
for.body9.4.2: ; preds = %for.body9.4.2, %for.inc.4.1
- %i14 = phi i32** [ %incdec.ptr.4.2, %for.body9.4.2 ], [ %incdec.ptr.4.1, %for.inc.4.1 ]
- %incdec.ptr.4.2 = getelementptr inbounds i32*, i32** %i14, i64 -1
+ %i14 = phi ptr [ %incdec.ptr.4.2, %for.body9.4.2 ], [ %incdec.ptr.4.1, %for.inc.4.1 ]
+ %incdec.ptr.4.2 = getelementptr inbounds ptr, ptr %i14, i64 -1
br i1 undef, label %for.body9.4.2, label %for.inc.4.2
for.inc.4.2: ; preds = %for.body9.4.2
br label %for.body9.4.3
for.body9.4.3: ; preds = %for.body9.4.3, %for.inc.4.2
- %i15 = phi i32** [ %incdec.ptr.4.3, %for.body9.4.3 ], [ %incdec.ptr.4.2, %for.inc.4.2 ]
- %incdec.ptr.4.3 = getelementptr inbounds i32*, i32** %i15, i64 -1
+ %i15 = phi ptr [ %incdec.ptr.4.3, %for.body9.4.3 ], [ %incdec.ptr.4.2, %for.inc.4.2 ]
+ %incdec.ptr.4.3 = getelementptr inbounds ptr, ptr %i15, i64 -1
br i1 undef, label %for.body9.4.3, label %for.inc.4.3
for.inc.4.3: ; preds = %for.body9.4.3
br label %for.body9.4.4
for.body9.4.4: ; preds = %for.body9.4.4, %for.inc.4.3
- %i16 = phi i32** [ %incdec.ptr.4.4, %for.body9.4.4 ], [ %incdec.ptr.4.3, %for.inc.4.3 ]
- %incdec.ptr.4.4 = getelementptr inbounds i32*, i32** %i16, i64 -1
+ %i16 = phi ptr [ %incdec.ptr.4.4, %for.body9.4.4 ], [ %incdec.ptr.4.3, %for.inc.4.3 ]
+ %incdec.ptr.4.4 = getelementptr inbounds ptr, ptr %i16, i64 -1
br i1 undef, label %for.body9.4.4, label %for.inc.4.4
for.inc.4.4: ; preds = %for.body9.4.4
br label %for.body9.4.5
for.body9.4.5: ; preds = %for.body9.4.5, %for.inc.4.4
- %i17 = phi i32** [ %incdec.ptr.4.5, %for.body9.4.5 ], [ %incdec.ptr.4.4, %for.inc.4.4 ]
- %incdec.ptr.4.5 = getelementptr inbounds i32*, i32** %i17, i64 -1
+ %i17 = phi ptr [ %incdec.ptr.4.5, %for.body9.4.5 ], [ %incdec.ptr.4.4, %for.inc.4.4 ]
+ %incdec.ptr.4.5 = getelementptr inbounds ptr, ptr %i17, i64 -1
br i1 undef, label %for.body9.4.5, label %for.inc.4.5
for.inc.4.5: ; preds = %for.body9.4.5
br label %for.body9.5
for.body9.3.1: ; preds = %for.body9.3.1, %for.inc.3
- %i18 = phi i32** [ %incdec.ptr.3.1, %for.body9.3.1 ], [ %incdec.ptr.3, %for.inc.3 ]
- %incdec.ptr.3.1 = getelementptr inbounds i32*, i32** %i18, i64 -1
+ %i18 = phi ptr [ %incdec.ptr.3.1, %for.body9.3.1 ], [ %incdec.ptr.3, %for.inc.3 ]
+ %incdec.ptr.3.1 = getelementptr inbounds ptr, ptr %i18, i64 -1
br i1 undef, label %for.body9.3.1, label %for.inc.3.1
for.inc.3.1: ; preds = %for.body9.3.1
br label %for.body9.3.2
for.body9.3.2: ; preds = %for.body9.3.2, %for.inc.3.1
- %i19 = phi i32** [ %incdec.ptr.3.2, %for.body9.3.2 ], [ %incdec.ptr.3.1, %for.inc.3.1 ]
- %incdec.ptr.3.2 = getelementptr inbounds i32*, i32** %i19, i64 -1
+ %i19 = phi ptr [ %incdec.ptr.3.2, %for.body9.3.2 ], [ %incdec.ptr.3.1, %for.inc.3.1 ]
+ %incdec.ptr.3.2 = getelementptr inbounds ptr, ptr %i19, i64 -1
br i1 undef, label %for.body9.3.2, label %for.inc.3.2
for.inc.3.2: ; preds = %for.body9.3.2
br label %for.body9.3.3
for.body9.3.3: ; preds = %for.body9.3.3, %for.inc.3.2
- %i20 = phi i32** [ %incdec.ptr.3.3, %for.body9.3.3 ], [ %incdec.ptr.3.2, %for.inc.3.2 ]
- %incdec.ptr.3.3 = getelementptr inbounds i32*, i32** %i20, i64 -1
+ %i20 = phi ptr [ %incdec.ptr.3.3, %for.body9.3.3 ], [ %incdec.ptr.3.2, %for.inc.3.2 ]
+ %incdec.ptr.3.3 = getelementptr inbounds ptr, ptr %i20, i64 -1
br i1 undef, label %for.body9.3.3, label %for.inc.3.3
for.inc.3.3: ; preds = %for.body9.3.3
br label %for.body9.3.4
for.body9.3.4: ; preds = %for.body9.3.4, %for.inc.3.3
- %i21 = phi i32** [ %incdec.ptr.3.4, %for.body9.3.4 ], [ %incdec.ptr.3.3, %for.inc.3.3 ]
- %incdec.ptr.3.4 = getelementptr inbounds i32*, i32** %i21, i64 -1
+ %i21 = phi ptr [ %incdec.ptr.3.4, %for.body9.3.4 ], [ %incdec.ptr.3.3, %for.inc.3.3 ]
+ %incdec.ptr.3.4 = getelementptr inbounds ptr, ptr %i21, i64 -1
br i1 undef, label %for.body9.3.4, label %for.inc.3.4
for.inc.3.4: ; preds = %for.body9.3.4
br label %for.body9.3.5
for.body9.3.5: ; preds = %for.body9.3.5, %for.inc.3.4
- %i22 = phi i32** [ %incdec.ptr.3.5, %for.body9.3.5 ], [ %incdec.ptr.3.4, %for.inc.3.4 ]
- %incdec.ptr.3.5 = getelementptr inbounds i32*, i32** %i22, i64 -1
+ %i22 = phi ptr [ %incdec.ptr.3.5, %for.body9.3.5 ], [ %incdec.ptr.3.4, %for.inc.3.4 ]
+ %incdec.ptr.3.5 = getelementptr inbounds ptr, ptr %i22, i64 -1
br i1 undef, label %for.body9.3.5, label %for.inc.3.5
for.inc.3.5: ; preds = %for.body9.3.5
br label %for.body9.4
for.body9.2.1: ; preds = %for.body9.2.1, %for.inc.2
- %i23 = phi i32** [ %incdec.ptr.2.1, %for.body9.2.1 ], [ %incdec.ptr.2, %for.inc.2 ]
- %incdec.ptr.2.1 = getelementptr inbounds i32*, i32** %i23, i64 -1
+ %i23 = phi ptr [ %incdec.ptr.2.1, %for.body9.2.1 ], [ %incdec.ptr.2, %for.inc.2 ]
+ %incdec.ptr.2.1 = getelementptr inbounds ptr, ptr %i23, i64 -1
br i1 undef, label %for.body9.2.1, label %for.inc.2.1
for.inc.2.1: ; preds = %for.body9.2.1
br label %for.body9.2.2
for.body9.2.2: ; preds = %for.body9.2.2, %for.inc.2.1
- %i24 = phi i32** [ %incdec.ptr.2.2, %for.body9.2.2 ], [ %incdec.ptr.2.1, %for.inc.2.1 ]
- %incdec.ptr.2.2 = getelementptr inbounds i32*, i32** %i24, i64 -1
+ %i24 = phi ptr [ %incdec.ptr.2.2, %for.body9.2.2 ], [ %incdec.ptr.2.1, %for.inc.2.1 ]
+ %incdec.ptr.2.2 = getelementptr inbounds ptr, ptr %i24, i64 -1
br i1 undef, label %for.body9.2.2, label %for.inc.2.2
for.inc.2.2: ; preds = %for.body9.2.2
br label %for.body9.2.3
for.body9.2.3: ; preds = %for.body9.2.3, %for.inc.2.2
- %i25 = phi i32** [ %incdec.ptr.2.3, %for.body9.2.3 ], [ %incdec.ptr.2.2, %for.inc.2.2 ]
- %incdec.ptr.2.3 = getelementptr inbounds i32*, i32** %i25, i64 -1
+ %i25 = phi ptr [ %incdec.ptr.2.3, %for.body9.2.3 ], [ %incdec.ptr.2.2, %for.inc.2.2 ]
+ %incdec.ptr.2.3 = getelementptr inbounds ptr, ptr %i25, i64 -1
br i1 undef, label %for.body9.2.3, label %for.inc.2.3
for.inc.2.3: ; preds = %for.body9.2.3
br label %for.body9.2.4
for.body9.2.4: ; preds = %for.body9.2.4, %for.inc.2.3
- %i26 = phi i32** [ %incdec.ptr.2.4, %for.body9.2.4 ], [ %incdec.ptr.2.3, %for.inc.2.3 ]
- %incdec.ptr.2.4 = getelementptr inbounds i32*, i32** %i26, i64 -1
+ %i26 = phi ptr [ %incdec.ptr.2.4, %for.body9.2.4 ], [ %incdec.ptr.2.3, %for.inc.2.3 ]
+ %incdec.ptr.2.4 = getelementptr inbounds ptr, ptr %i26, i64 -1
br i1 undef, label %for.body9.2.4, label %for.inc.2.4
for.inc.2.4: ; preds = %for.body9.2.4
br label %for.body9.2.5
for.body9.2.5: ; preds = %for.body9.2.5, %for.inc.2.4
- %i27 = phi i32** [ %incdec.ptr.2.5, %for.body9.2.5 ], [ %incdec.ptr.2.4, %for.inc.2.4 ]
- %incdec.ptr.2.5 = getelementptr inbounds i32*, i32** %i27, i64 -1
+ %i27 = phi ptr [ %incdec.ptr.2.5, %for.body9.2.5 ], [ %incdec.ptr.2.4, %for.inc.2.4 ]
+ %incdec.ptr.2.5 = getelementptr inbounds ptr, ptr %i27, i64 -1
br i1 undef, label %for.body9.2.5, label %for.inc.2.5
for.inc.2.5: ; preds = %for.body9.2.5
br label %for.body9.3
for.body9.1.1: ; preds = %for.body9.1.1, %for.inc.1
- %i28 = phi i32** [ %incdec.ptr.1.1, %for.body9.1.1 ], [ %incdec.ptr.1, %for.inc.1 ]
- %incdec.ptr.1.1 = getelementptr inbounds i32*, i32** %i28, i64 -1
+ %i28 = phi ptr [ %incdec.ptr.1.1, %for.body9.1.1 ], [ %incdec.ptr.1, %for.inc.1 ]
+ %incdec.ptr.1.1 = getelementptr inbounds ptr, ptr %i28, i64 -1
br i1 undef, label %for.body9.1.1, label %for.inc.1.1
for.inc.1.1: ; preds = %for.body9.1.1
br label %for.body9.1.2
for.body9.1.2: ; preds = %for.body9.1.2, %for.inc.1.1
- %i29 = phi i32** [ %incdec.ptr.1.2, %for.body9.1.2 ], [ %incdec.ptr.1.1, %for.inc.1.1 ]
- %incdec.ptr.1.2 = getelementptr inbounds i32*, i32** %i29, i64 -1
+ %i29 = phi ptr [ %incdec.ptr.1.2, %for.body9.1.2 ], [ %incdec.ptr.1.1, %for.inc.1.1 ]
+ %incdec.ptr.1.2 = getelementptr inbounds ptr, ptr %i29, i64 -1
br i1 undef, label %for.body9.1.2, label %for.inc.1.2
for.inc.1.2: ; preds = %for.body9.1.2
br label %for.body9.1.3
for.body9.1.3: ; preds = %for.body9.1.3, %for.inc.1.2
- %i30 = phi i32** [ %incdec.ptr.1.3, %for.body9.1.3 ], [ %incdec.ptr.1.2, %for.inc.1.2 ]
- %incdec.ptr.1.3 = getelementptr inbounds i32*, i32** %i30, i64 -1
+ %i30 = phi ptr [ %incdec.ptr.1.3, %for.body9.1.3 ], [ %incdec.ptr.1.2, %for.inc.1.2 ]
+ %incdec.ptr.1.3 = getelementptr inbounds ptr, ptr %i30, i64 -1
br i1 undef, label %for.body9.1.3, label %for.inc.1.3
for.inc.1.3: ; preds = %for.body9.1.3
br label %for.body9.1.4
for.body9.1.4: ; preds = %for.body9.1.4, %for.inc.1.3
- %i31 = phi i32** [ %incdec.ptr.1.4, %for.body9.1.4 ], [ %incdec.ptr.1.3, %for.inc.1.3 ]
- %incdec.ptr.1.4 = getelementptr inbounds i32*, i32** %i31, i64 -1
+ %i31 = phi ptr [ %incdec.ptr.1.4, %for.body9.1.4 ], [ %incdec.ptr.1.3, %for.inc.1.3 ]
+ %incdec.ptr.1.4 = getelementptr inbounds ptr, ptr %i31, i64 -1
br i1 undef, label %for.body9.1.4, label %for.inc.1.4
for.inc.1.4: ; preds = %for.body9.1.4
br label %for.body9.1.5
for.body9.1.5: ; preds = %for.body9.1.5, %for.inc.1.4
- %i32 = phi i32** [ %incdec.ptr.1.5, %for.body9.1.5 ], [ %incdec.ptr.1.4, %for.inc.1.4 ]
- %incdec.ptr.1.5 = getelementptr inbounds i32*, i32** %i32, i64 -1
+ %i32 = phi ptr [ %incdec.ptr.1.5, %for.body9.1.5 ], [ %incdec.ptr.1.4, %for.inc.1.4 ]
+ %incdec.ptr.1.5 = getelementptr inbounds ptr, ptr %i32, i64 -1
br i1 undef, label %for.body9.1.5, label %for.inc.1.5
for.inc.1.5: ; preds = %for.body9.1.5
br label %for.body9.2
for.body9.118: ; preds = %for.body9.118, %for.inc
- %i33 = phi i32** [ %incdec.ptr, %for.inc ], [ %incdec.ptr.114, %for.body9.118 ]
- %incdec.ptr.114 = getelementptr inbounds i32*, i32** %i33, i64 -1
+ %i33 = phi ptr [ %incdec.ptr, %for.inc ], [ %incdec.ptr.114, %for.body9.118 ]
+ %incdec.ptr.114 = getelementptr inbounds ptr, ptr %i33, i64 -1
br i1 undef, label %for.body9.118, label %for.inc.119
for.inc.119: ; preds = %for.body9.118
br label %for.body9.225
for.body9.225: ; preds = %for.body9.225, %for.inc.119
- %i34 = phi i32** [ %incdec.ptr.114, %for.inc.119 ], [ %incdec.ptr.221, %for.body9.225 ]
- %incdec.ptr.221 = getelementptr inbounds i32*, i32** %i34, i64 -1
- %i35 = bitcast i32** %i34 to i64*
- %i36 = load i64, i64* %i35, align 8
+ %i34 = phi ptr [ %incdec.ptr.114, %for.inc.119 ], [ %incdec.ptr.221, %for.body9.225 ]
+ %incdec.ptr.221 = getelementptr inbounds ptr, ptr %i34, i64 -1
+ %i36 = load i64, ptr %i34, align 8
br i1 undef, label %for.body9.225, label %for.inc.226
for.inc.226: ; preds = %for.body9.225
br label %for.body9.332
for.body9.332: ; preds = %for.body9.332, %for.inc.226
- %i37 = phi i32** [ %incdec.ptr.221, %for.inc.226 ], [ %incdec.ptr.328, %for.body9.332 ]
- %incdec.ptr.328 = getelementptr inbounds i32*, i32** %i37, i64 -1
+ %i37 = phi ptr [ %incdec.ptr.221, %for.inc.226 ], [ %incdec.ptr.328, %for.body9.332 ]
+ %incdec.ptr.328 = getelementptr inbounds ptr, ptr %i37, i64 -1
br i1 undef, label %for.body9.332, label %for.inc.333
for.inc.333: ; preds = %for.body9.332
br label %for.body9.439
for.body9.439: ; preds = %for.body9.439, %for.inc.333
- %i38 = phi i32** [ %incdec.ptr.328, %for.inc.333 ], [ %incdec.ptr.435, %for.body9.439 ]
- %incdec.ptr.435 = getelementptr inbounds i32*, i32** %i38, i64 -1
+ %i38 = phi ptr [ %incdec.ptr.328, %for.inc.333 ], [ %incdec.ptr.435, %for.body9.439 ]
+ %incdec.ptr.435 = getelementptr inbounds ptr, ptr %i38, i64 -1
br i1 undef, label %for.body9.439, label %for.inc.440
for.inc.440: ; preds = %for.body9.439
br label %for.body9.546
for.body9.546: ; preds = %for.body9.546, %for.inc.440
- %i39 = phi i32** [ %incdec.ptr.435, %for.inc.440 ], [ %incdec.ptr.542, %for.body9.546 ]
- %incdec.ptr.542 = getelementptr inbounds i32*, i32** %i39, i64 -1
+ %i39 = phi ptr [ %incdec.ptr.435, %for.inc.440 ], [ %incdec.ptr.542, %for.body9.546 ]
+ %incdec.ptr.542 = getelementptr inbounds ptr, ptr %i39, i64 -1
br i1 undef, label %for.body9.546, label %for.inc.547
for.inc.547: ; preds = %for.body9.546
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
target triple = "arm64-apple-ios"
-define void @do_integer_add(i64 %iterations, i8* nocapture readonly %cookie) {
+define void @do_integer_add(i64 %iterations, ptr nocapture readonly %cookie) {
entry:
- %N = bitcast i8* %cookie to i32*
- %0 = load i32, i32* %N, align 4
+ %0 = load i32, ptr %cookie, align 4
%add = add nsw i32 %0, 57
%cmp56 = icmp eq i64 %iterations, 0
br i1 %cmp56, label %while.end, label %for.cond.preheader.preheader
; }
; return -7;
; }
-define float @test1(float* nocapture readonly %arr, i64 %start, float %threshold) {
+define float @test1(ptr nocapture readonly %arr, i64 %start, float %threshold) {
; CHECK-LABEL: test1:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: cbz x1, .LBB0_4
for.body: ; preds = %entry, %for.cond
%i.012 = phi i64 [ %inc, %for.cond ], [ %start, %entry ]
%add = add nsw i64 %i.012, 7
- %arrayidx = getelementptr inbounds float, float* %arr, i64 %add
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %arr, i64 %add
+ %0 = load float, ptr %arrayidx, align 4
%cmp1 = fcmp ogt float %0, %threshold
%inc = add nsw i64 %i.012, 1
br i1 %cmp1, label %cleanup2, label %for.cond
; Same as test1, except i has another use:
; if (x > threshold) ---> if (x > threshold + i)
-define float @test2(float* nocapture readonly %arr, i64 %start, float %threshold) {
+define float @test2(ptr nocapture readonly %arr, i64 %start, float %threshold) {
; CHECK-LABEL: test2:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: cbz x1, .LBB1_4
for.body: ; preds = %entry, %for.cond
%i.015 = phi i64 [ %inc, %for.cond ], [ %start, %entry ]
%add = add nsw i64 %i.015, 7
- %arrayidx = getelementptr inbounds float, float* %arr, i64 %add
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %arr, i64 %add
+ %0 = load float, ptr %arrayidx, align 4
%conv = sitofp i64 %i.015 to float
%add1 = fadd float %conv, %threshold
%cmp2 = fcmp ogt float %0, %add1
; FIXME: Handle VectorType in SCEVExpander::expandAddToGEP.
; The generated IR is not ideal with base 'scalar_vector' cast to i8*, and do ugly getelementptr over casted base.
; CHECK: uglygep
-define void @test(i32* %a, i32 %v, i64 %n) {
+define void @test(ptr %a, i32 %v, i64 %n) {
entry:
%scalar_vector = alloca <vscale x 4 x i32>, align 16
%num_elm = call i64 @llvm.aarch64.sve.cntw(i32 31)
loop_header:
%indvar = phi i64 [ 0, %entry ], [ %indvar_next, %for_loop ]
- %gep_a_0 = getelementptr i32, i32* %a, i64 0
- %gep_vec_0 = getelementptr inbounds <vscale x 4 x i32>, <vscale x 4 x i32>* %scalar_vector, i64 0, i64 0
br label %scalar_loop
scalar_loop:
- %gep_vec = phi i32* [ %gep_vec_0, %loop_header ], [ %gep_vec_inc, %scalar_loop ]
+ %gep_vec = phi ptr [ %scalar_vector, %loop_header ], [ %gep_vec_inc, %scalar_loop ]
%scalar_iv = phi i64 [ 0, %loop_header ], [ %scalar_iv_next, %scalar_loop ]
- store i32 %v, i32* %gep_vec, align 4
+ store i32 %v, ptr %gep_vec, align 4
%scalar_iv_next = add i64 %scalar_iv, 1
- %gep_vec_inc = getelementptr i32, i32* %gep_vec, i64 1
+ %gep_vec_inc = getelementptr i32, ptr %gep_vec, i64 1
%scalar_exit = icmp eq i64 %scalar_iv_next, %scalar_count
br i1 %scalar_exit, label %for_loop, label %scalar_loop
for_loop:
- %vector = load <vscale x 4 x i32>, <vscale x 4 x i32>* %scalar_vector, align 16
- %gep_a = getelementptr i32, i32* %gep_a_0, i64 %indvar
- %vector_ptr = bitcast i32* %gep_a to <vscale x 4 x i32>*
- call void @llvm.masked.store.nxv4i32.p0nxv4i32(<vscale x 4 x i32> %vector, <vscale x 4 x i32>* %vector_ptr, i32 4, <vscale x 4 x i1> undef)
+ %vector = load <vscale x 4 x i32>, ptr %scalar_vector, align 16
+ %gep_a = getelementptr i32, ptr %a, i64 %indvar
+ call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> %vector, ptr %gep_a, i32 4, <vscale x 4 x i1> undef)
%indvar_next = add nsw i64 %indvar, %scalar_count
%exit_cond = icmp eq i64 %indvar_next, %n
br i1 %exit_cond, label %exit, label %loop_header
declare i64 @llvm.aarch64.sve.cntw(i32 immarg)
-declare void @llvm.masked.store.nxv4i32.p0nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>*, i32 immarg, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32>, ptr, i32 immarg, <vscale x 4 x i1>)
loop:
%idx0 = phi i32 [ %next_idx0, %loop ], [ 0, %entry ]
- %0 = getelementptr inbounds i32, i32 addrspace(5)* null, i32 %idx0
- %1 = getelementptr inbounds i32, i32 addrspace(1)* null, i32 %idx0
- store i32 1, i32 addrspace(5)* %0
- store i32 7, i32 addrspace(1)* %1
+ %0 = getelementptr inbounds i32, ptr addrspace(5) null, i32 %idx0
+ %1 = getelementptr inbounds i32, ptr addrspace(1) null, i32 %idx0
+ store i32 1, ptr addrspace(5) %0
+ store i32 7, ptr addrspace(1) %1
%next_idx0 = add nuw nsw i32 %idx0, 1
br label %loop
}
; CHECK-NEXT: [[C0:%.*]] = icmp eq i32 [[LSR_IV_NEXT]], 0
; CHECK-NEXT: br i1 [[C0]], label [[BB13:%.*]], label [[BB]]
; CHECK: bb:
-; CHECK-NEXT: [[T:%.*]] = load i8 addrspace(3)*, i8 addrspace(3)* addrspace(3)* undef, align 4
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8 addrspace(3)* [[T]], i32 [[LSR_IV_NEXT2]]
-; CHECK-NEXT: [[C1:%.*]] = icmp ne i8 addrspace(3)* [[SCEVGEP]], null
+; CHECK-NEXT: [[T:%.*]] = load ptr addrspace(3), ptr addrspace(3) undef, align 4
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr addrspace(3) [[T]], i32 [[LSR_IV_NEXT2]]
+; CHECK-NEXT: [[C1:%.*]] = icmp ne ptr addrspace(3) [[SCEVGEP]], null
; CHECK-NEXT: br i1 [[C1]], label [[BB11]], label [[BB13]]
; CHECK: bb13:
; CHECK-NEXT: unreachable
br i1 %c0, label %bb13, label %bb
bb: ; preds = %bb11
- %t = load i8 addrspace(3)*, i8 addrspace(3)* addrspace(3)* undef, align 4
- %p = getelementptr i8, i8 addrspace(3)* %t, i32 %ii
- %c1 = icmp ne i8 addrspace(3)* %p, null
+ %t = load ptr addrspace(3), ptr addrspace(3) undef, align 4
+ %p = getelementptr i8, ptr addrspace(3) %t, i32 %ii
+ %c1 = icmp ne ptr addrspace(3) %p, null
%i.next = add i32 %i, 1
br i1 %c1, label %bb11, label %bb13
; CHECK-NEXT: [[C0:%.*]] = icmp eq i64 [[LSR_IV_NEXT]], 0
; CHECK-NEXT: br i1 [[C0]], label [[BB13:%.*]], label [[BB]]
; CHECK: bb:
-; CHECK-NEXT: [[T:%.*]] = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* undef, align 8
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8 addrspace(1)* [[T]], i64 [[LSR_IV_NEXT2]]
-; CHECK-NEXT: [[C1:%.*]] = icmp ne i8 addrspace(1)* [[SCEVGEP]], null
+; CHECK-NEXT: [[T:%.*]] = load ptr addrspace(1), ptr addrspace(1) undef, align 8
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr addrspace(1) [[T]], i64 [[LSR_IV_NEXT2]]
+; CHECK-NEXT: [[C1:%.*]] = icmp ne ptr addrspace(1) [[SCEVGEP]], null
; CHECK-NEXT: br i1 [[C1]], label [[BB11]], label [[BB13]]
; CHECK: bb13:
; CHECK-NEXT: unreachable
br i1 %c0, label %bb13, label %bb
bb: ; preds = %bb11
- %t = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* undef, align 8
- %p = getelementptr i8, i8 addrspace(1)* %t, i64 %ii
- %c1 = icmp ne i8 addrspace(1)* %p, null
+ %t = load ptr addrspace(1), ptr addrspace(1) undef, align 8
+ %p = getelementptr i8, ptr addrspace(1) %t, i64 %ii
+ %c1 = icmp ne ptr addrspace(1) %p, null
%i.next = add i64 %i, 1
br i1 %c1, label %bb11, label %bb13
; CHECK-NEXT: [[C0:%.*]] = icmp eq i32 [[LSR_IV_NEXT]], 0
; CHECK-NEXT: br i1 [[C0]], label [[BB13:%.*]], label [[BB]]
; CHECK: bb:
-; CHECK-NEXT: [[T:%.*]] = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* undef, align 8
+; CHECK-NEXT: [[T:%.*]] = load ptr addrspace(1), ptr addrspace(1) undef, align 8
; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[LSR_IV1]] to i64
-; CHECK-NEXT: [[P:%.*]] = getelementptr i8, i8 addrspace(1)* [[T]], i64 [[IDXPROM]]
-; CHECK-NEXT: [[C1:%.*]] = icmp ne i8 addrspace(1)* [[P]], null
+; CHECK-NEXT: [[P:%.*]] = getelementptr i8, ptr addrspace(1) [[T]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[C1:%.*]] = icmp ne ptr addrspace(1) [[P]], null
; CHECK-NEXT: br i1 [[C1]], label [[BB11]], label [[BB13]]
; CHECK: bb13:
; CHECK-NEXT: unreachable
br i1 %c0, label %bb13, label %bb
bb: ; preds = %bb11
- %t = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* undef, align 8
+ %t = load ptr addrspace(1), ptr addrspace(1) undef, align 8
%idxprom = sext i32 %ii to i64
- %p = getelementptr i8, i8 addrspace(1)* %t, i64 %idxprom
- %c1 = icmp ne i8 addrspace(1)* %p, null
+ %p = getelementptr i8, ptr addrspace(1) %t, i64 %idxprom
+ %c1 = icmp ne ptr addrspace(1) %p, null
%i.next = add i32 %i, 1
br i1 %c1, label %bb11, label %bb13
; CHECK-NEXT: [[C0:%.*]] = icmp eq i32 [[LSR_IV_NEXT]], 0
; CHECK-NEXT: br i1 [[C0]], label [[BB13:%.*]], label [[BB]]
; CHECK: bb:
-; CHECK-NEXT: [[T:%.*]] = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* undef, align 8
-; CHECK-NEXT: [[P:%.*]] = getelementptr i8, i8 addrspace(1)* [[T]], i64 [[II_EXT]]
-; CHECK-NEXT: [[C1:%.*]] = icmp ne i8 addrspace(1)* [[P]], null
+; CHECK-NEXT: [[T:%.*]] = load ptr addrspace(1), ptr addrspace(1) undef, align 8
+; CHECK-NEXT: [[P:%.*]] = getelementptr i8, ptr addrspace(1) [[T]], i64 [[II_EXT]]
+; CHECK-NEXT: [[C1:%.*]] = icmp ne ptr addrspace(1) [[P]], null
; CHECK-NEXT: br i1 [[C1]], label [[BB11]], label [[BB13]]
; CHECK: bb13:
; CHECK-NEXT: unreachable
br i1 %c0, label %bb13, label %bb
bb: ; preds = %bb11
- %t = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* undef, align 8
- %p = getelementptr i8, i8 addrspace(1)* %t, i64 %ii.ext
- %c1 = icmp ne i8 addrspace(1)* %p, null
+ %t = load ptr addrspace(1), ptr addrspace(1) undef, align 8
+ %p = getelementptr i8, ptr addrspace(1) %t, i64 %ii.ext
+ %c1 = icmp ne ptr addrspace(1) %p, null
%i.next = add i32 %i, 1
br i1 %c1, label %bb11, label %bb13
for.body.i: ; preds = %for.body.i, %for.body
%ij = phi i32 [ 0, %for.body ], [ %inc14, %for.body.i ]
- %tmp = load i32, i32 addrspace(5)* undef, align 4
+ %tmp = load i32, ptr addrspace(5) undef, align 4
%inc13 = or i32 %ij, 2
%shl = shl i32 1, 0
%and = and i32 %shl, %tmp
%tobool = icmp eq i32 %and, 0
%add = mul nuw nsw i32 %inc13, 5
%tmp1 = zext i32 %add to i64
- %arrayidx8 = getelementptr inbounds [32 x [800 x i32]], [32 x [800 x i32]] addrspace(4)* @array, i64 0, i64 undef, i64 %tmp1
- %tmp2 = load i32, i32 addrspace(4)* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds [32 x [800 x i32]], ptr addrspace(4) @array, i64 0, i64 undef, i64 %tmp1
+ %tmp2 = load i32, ptr addrspace(4) %arrayidx8, align 4
%and9 = select i1 %tobool, i32 0, i32 %tmp2
%xor = xor i32 undef, %and9
%inc1 = or i32 %ij, 3
%add2 = mul nuw nsw i32 %inc1, 5
%add6 = add nuw nsw i32 %add2, 1
%tmp3 = zext i32 %add6 to i64
- %arrayidx9 = getelementptr inbounds [32 x [800 x i32]], [32 x [800 x i32]] addrspace(4)* @array, i64 0, i64 undef, i64 %tmp3
- %tmp4 = bitcast i32 addrspace(4)* %arrayidx9 to <4 x i32> addrspace(4)*
- %tmp5 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp4, align 4
+ %arrayidx9 = getelementptr inbounds [32 x [800 x i32]], ptr addrspace(4) @array, i64 0, i64 undef, i64 %tmp3
+ %tmp5 = load <4 x i32>, ptr addrspace(4) %arrayidx9, align 4
%reorder_shuffle2 = shufflevector <4 x i32> %tmp5, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
%tmp6 = select <4 x i1> undef, <4 x i32> zeroinitializer, <4 x i32> %reorder_shuffle2
%inc14 = add nuw nsw i32 %ij, 4
for.body.i: ; preds = %for.body.i, %for.body
%ij = phi i32 [ 0, %for.body ], [ %inc14, %for.body.i ]
- %tmp = load i32, i32 addrspace(5)* undef, align 4
+ %tmp = load i32, ptr addrspace(5) undef, align 4
%inc13 = or i32 %ij, 2
%shl = shl i32 1, 0
%and = and i32 %shl, %tmp
%tobool = icmp eq i32 %and, 0
%add = mul nuw nsw i32 %inc13, 5
%tmp1 = zext i32 %add to i64
- %arrayidx8 = getelementptr inbounds [32 x [800 x i32]], [32 x [800 x i32]] addrspace(4)* @array, i64 0, i64 undef, i64 %tmp1
- %tmp2 = load i32, i32 addrspace(4)* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds [32 x [800 x i32]], ptr addrspace(4) @array, i64 0, i64 undef, i64 %tmp1
+ %tmp2 = load i32, ptr addrspace(4) %arrayidx8, align 4
%and9 = select i1 %tobool, i32 0, i32 %tmp2
%xor = xor i32 undef, %and9
%inc1 = or i32 %ij, 3
%add2 = mul nuw nsw i32 %inc1, 5
%add6 = add nuw nsw i32 %add2, 1
%tmp3 = zext i32 %add6 to i64
- %arrayidx9 = getelementptr inbounds [32 x [800 x i32]], [32 x [800 x i32]] addrspace(4)* @array, i64 0, i64 undef, i64 %tmp3
- %tmp4 = bitcast i32 addrspace(4)* %arrayidx9 to <4 x i32> addrspace(4)*
- %tmp5 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp4, align 4
+ %arrayidx9 = getelementptr inbounds [32 x [800 x i32]], ptr addrspace(4) @array, i64 0, i64 undef, i64 %tmp3
+ %tmp5 = load <4 x i32>, ptr addrspace(4) %arrayidx9, align 4
%reorder_shuffle2 = shufflevector <4 x i32> %tmp5, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
%tmp6 = select <4 x i1> undef, <4 x i32> zeroinitializer, <4 x i32> %reorder_shuffle2
%inc14 = add nuw nsw i32 %ij, 4
; reg(%v3)
-%s = type { i32* }
+%s = type { ptr }
@ncol = external global i32, align 4
-declare i32* @getptr() nounwind
-declare %s* @getstruct() nounwind
+declare ptr @getptr() nounwind
+declare ptr @getstruct() nounwind
; CHECK: @main
; Check that the loop preheader contains no address computation.
; CHECK: ldr{{.*}}lsl #2
define i32 @main() nounwind ssp {
entry:
- %v0 = load i32, i32* @ncol, align 4
- %v1 = tail call i32* @getptr() nounwind
+ %v0 = load i32, ptr @ncol, align 4
+ %v1 = tail call ptr @getptr() nounwind
%cmp10.i = icmp eq i32 %v0, 0
br label %while.cond.outer
while.cond.outer:
- %call18 = tail call %s* @getstruct() nounwind
+ %call18 = tail call ptr @getstruct() nounwind
br label %while.cond
while.cond:
- %cmp20 = icmp eq i32* %v1, null
+ %cmp20 = icmp eq ptr %v1, null
br label %while.body
while.body:
- %v3 = load i32, i32* @ncol, align 4
+ %v3 = load i32, ptr @ncol, align 4
br label %end_of_chain
end_of_chain:
- %state.i = getelementptr inbounds %s, %s* %call18, i32 0, i32 0
- %v4 = load i32*, i32** %state.i, align 4
+ %v4 = load ptr, ptr %call18, align 4
br label %while.cond.i.i
while.cond.i.i:
br i1 %tobool.i.i, label %where.exit, label %land.rhs.i.i
land.rhs.i.i:
- %arrayidx.i.i = getelementptr inbounds i32, i32* %v4, i32 %dec.i.i
- %v5 = load i32, i32* %arrayidx.i.i, align 4
- %arrayidx1.i.i = getelementptr inbounds i32, i32* %v1, i32 %dec.i.i
- %v6 = load i32, i32* %arrayidx1.i.i, align 4
+ %arrayidx.i.i = getelementptr inbounds i32, ptr %v4, i32 %dec.i.i
+ %v5 = load i32, ptr %arrayidx.i.i, align 4
+ %arrayidx1.i.i = getelementptr inbounds i32, ptr %v1, i32 %dec.i.i
+ %v6 = load i32, ptr %arrayidx1.i.i, align 4
%cmp.i.i = icmp eq i32 %v5, %v6
br i1 %cmp.i.i, label %while.cond.i.i, label %equal_data.exit.i
bb1:
%mul.0 = mul i32 %c.0, %c.0
- %gelptr.0 = getelementptr inbounds i16, i16* undef, i32 %mul.0
+ %gelptr.0 = getelementptr inbounds i16, ptr undef, i32 %mul.0
br label %loop1
loop1:
bb3:
%add.0 = add i32 undef, %mul.1
- %gelptr.1 = getelementptr inbounds i16, i16* %gelptr.0, i32 %add.0
- store i16 undef, i16* %gelptr.1, align 2
+ %gelptr.1 = getelementptr inbounds i16, ptr %gelptr.0, i32 %add.0
+ store i16 undef, ptr %gelptr.1, align 2
br label %bb4
bb4:
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "thumbv8.1m-arm-none-eabi"
-define float @vctp8(float* %0, i32 %1) {
+define float @vctp8(ptr %0, i32 %1) {
; CHECK-LABEL: @vctp8(
; CHECK-NEXT: [[TMP3:%.*]] = tail call { <4 x i32>, i32 } @llvm.arm.mve.vidup.v4i32(i32 0, i32 8)
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP3]], 0
; CHECK-NEXT: [[TMP5:%.*]] = add nsw i32 [[TMP1:%.*]], -1
-; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint float* [[TMP0:%.*]] to i32
+; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[TMP0:%.*]] to i32
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP6]], i32 0
; CHECK-NEXT: [[TMP8:%.*]] = add <4 x i32> [[TMP7]], <i32 -32, i32 undef, i32 undef, i32 undef>
; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x i32> [[TMP8]], <4 x i32> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP21]] = add i32 [[TMP12]], -4
; CHECK-NEXT: br i1 [[TMP20]], label [[TMP11]], label [[TMP22:%.*]]
; CHECK: 22:
-; CHECK-NEXT: [[TMP23:%.*]] = tail call i32 bitcast (i32 (...)* @vecAddAcrossF32Mve to i32 (<4 x float>)*)(<4 x float> [[TMP19]])
+; CHECK-NEXT: [[TMP23:%.*]] = tail call i32 @vecAddAcrossF32Mve(<4 x float> [[TMP19]])
; CHECK-NEXT: [[TMP24:%.*]] = sitofp i32 [[TMP23]] to float
; CHECK-NEXT: [[TMP25:%.*]] = tail call float @llvm.fabs.f32(float [[TMP24]])
; CHECK-NEXT: ret float [[TMP25]]
%3 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vidup.v4i32(i32 0, i32 8)
%4 = extractvalue { <4 x i32>, i32 } %3, 0
%5 = add nsw i32 %1, -1
- %6 = ptrtoint float* %0 to i32
+ %6 = ptrtoint ptr %0 to i32
%7 = insertelement <4 x i32> poison, i32 %6, i32 0
%8 = add <4 x i32> %7, <i32 -32, i32 undef, i32 undef, i32 undef>
%9 = shufflevector <4 x i32> %8, <4 x i32> poison, <4 x i32> zeroinitializer
br i1 %21, label %11, label %22
22: ; preds = %11
- %23 = tail call i32 bitcast (i32 (...)* @vecAddAcrossF32Mve to i32 (<4 x float>)*)(<4 x float> %19)
+ %23 = tail call i32 @vecAddAcrossF32Mve(<4 x float> %19)
%24 = sitofp i32 %23 to float
%25 = tail call float @llvm.fabs.f32(float %24)
ret float %25
}
-define float @vctp16(float* %0, i32 %1) {
+define float @vctp16(ptr %0, i32 %1) {
; CHECK-LABEL: @vctp16(
; CHECK-NEXT: [[TMP3:%.*]] = tail call { <4 x i32>, i32 } @llvm.arm.mve.vidup.v4i32(i32 0, i32 8)
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP3]], 0
; CHECK-NEXT: [[TMP5:%.*]] = add nsw i32 [[TMP1:%.*]], -1
-; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint float* [[TMP0:%.*]] to i32
+; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[TMP0:%.*]] to i32
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP6]], i32 0
; CHECK-NEXT: [[TMP8:%.*]] = add <4 x i32> [[TMP7]], <i32 -32, i32 undef, i32 undef, i32 undef>
; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x i32> [[TMP8]], <4 x i32> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP21]] = add i32 [[TMP12]], -4
; CHECK-NEXT: br i1 [[TMP20]], label [[TMP11]], label [[TMP22:%.*]]
; CHECK: 22:
-; CHECK-NEXT: [[TMP23:%.*]] = tail call i32 bitcast (i32 (...)* @vecAddAcrossF32Mve to i32 (<4 x float>)*)(<4 x float> [[TMP19]])
+; CHECK-NEXT: [[TMP23:%.*]] = tail call i32 @vecAddAcrossF32Mve(<4 x float> [[TMP19]])
; CHECK-NEXT: [[TMP24:%.*]] = sitofp i32 [[TMP23]] to float
; CHECK-NEXT: [[TMP25:%.*]] = tail call float @llvm.fabs.f32(float [[TMP24]])
; CHECK-NEXT: ret float [[TMP25]]
%3 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vidup.v4i32(i32 0, i32 8)
%4 = extractvalue { <4 x i32>, i32 } %3, 0
%5 = add nsw i32 %1, -1
- %6 = ptrtoint float* %0 to i32
+ %6 = ptrtoint ptr %0 to i32
%7 = insertelement <4 x i32> poison, i32 %6, i32 0
%8 = add <4 x i32> %7, <i32 -32, i32 undef, i32 undef, i32 undef>
%9 = shufflevector <4 x i32> %8, <4 x i32> poison, <4 x i32> zeroinitializer
br i1 %21, label %11, label %22
22: ; preds = %11
- %23 = tail call i32 bitcast (i32 (...)* @vecAddAcrossF32Mve to i32 (<4 x float>)*)(<4 x float> %19)
+ %23 = tail call i32 @vecAddAcrossF32Mve(<4 x float> %19)
%24 = sitofp i32 %23 to float
%25 = tail call float @llvm.fabs.f32(float %24)
ret float %25
}
-define float @vctpi32(float* %0, i32 %1) {
+define float @vctpi32(ptr %0, i32 %1) {
; CHECK-LABEL: @vctpi32(
; CHECK-NEXT: [[TMP3:%.*]] = tail call { <4 x i32>, i32 } @llvm.arm.mve.vidup.v4i32(i32 0, i32 8)
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP3]], 0
; CHECK-NEXT: [[TMP5:%.*]] = add nsw i32 [[TMP1:%.*]], -1
-; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint float* [[TMP0:%.*]] to i32
+; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[TMP0:%.*]] to i32
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP6]], i32 0
; CHECK-NEXT: [[TMP8:%.*]] = add <4 x i32> [[TMP7]], <i32 -32, i32 undef, i32 undef, i32 undef>
; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x i32> [[TMP8]], <4 x i32> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP21]] = add i32 [[TMP12]], -4
; CHECK-NEXT: br i1 [[TMP20]], label [[TMP11]], label [[TMP22:%.*]]
; CHECK: 22:
-; CHECK-NEXT: [[TMP23:%.*]] = tail call i32 bitcast (i32 (...)* @vecAddAcrossF32Mve to i32 (<4 x float>)*)(<4 x float> [[TMP19]])
+; CHECK-NEXT: [[TMP23:%.*]] = tail call i32 @vecAddAcrossF32Mve(<4 x float> [[TMP19]])
; CHECK-NEXT: [[TMP24:%.*]] = sitofp i32 [[TMP23]] to float
; CHECK-NEXT: [[TMP25:%.*]] = tail call float @llvm.fabs.f32(float [[TMP24]])
; CHECK-NEXT: ret float [[TMP25]]
%3 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vidup.v4i32(i32 0, i32 8)
%4 = extractvalue { <4 x i32>, i32 } %3, 0
%5 = add nsw i32 %1, -1
- %6 = ptrtoint float* %0 to i32
+ %6 = ptrtoint ptr %0 to i32
%7 = insertelement <4 x i32> poison, i32 %6, i32 0
%8 = add <4 x i32> %7, <i32 -32, i32 undef, i32 undef, i32 undef>
%9 = shufflevector <4 x i32> %8, <4 x i32> poison, <4 x i32> zeroinitializer
br i1 %21, label %11, label %22
22: ; preds = %11
- %23 = tail call i32 bitcast (i32 (...)* @vecAddAcrossF32Mve to i32 (<4 x float>)*)(<4 x float> %19)
+ %23 = tail call i32 @vecAddAcrossF32Mve(<4 x float> %19)
%24 = sitofp i32 %23 to float
%25 = tail call float @llvm.fabs.f32(float %24)
ret float %25
}
-define float @vctpi64(float* %0, i32 %1) {
+define float @vctpi64(ptr %0, i32 %1) {
; CHECK-LABEL: @vctpi64(
; CHECK-NEXT: [[TMP3:%.*]] = tail call { <4 x i32>, i32 } @llvm.arm.mve.vidup.v4i32(i32 0, i32 8)
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP3]], 0
; CHECK-NEXT: [[TMP5:%.*]] = add nsw i32 [[TMP1:%.*]], -1
-; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint float* [[TMP0:%.*]] to i32
+; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[TMP0:%.*]] to i32
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP6]], i32 0
; CHECK-NEXT: [[TMP8:%.*]] = add <4 x i32> [[TMP7]], <i32 -32, i32 undef, i32 undef, i32 undef>
; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x i32> [[TMP8]], <4 x i32> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP23]] = add i32 [[TMP12]], -4
; CHECK-NEXT: br i1 [[TMP22]], label [[TMP11]], label [[TMP24:%.*]]
; CHECK: 24:
-; CHECK-NEXT: [[TMP25:%.*]] = tail call i32 bitcast (i32 (...)* @vecAddAcrossF32Mve to i32 (<4 x float>)*)(<4 x float> [[TMP21]])
+; CHECK-NEXT: [[TMP25:%.*]] = tail call i32 @vecAddAcrossF32Mve(<4 x float> [[TMP21]])
; CHECK-NEXT: [[TMP26:%.*]] = sitofp i32 [[TMP25]] to float
; CHECK-NEXT: [[TMP27:%.*]] = tail call float @llvm.fabs.f32(float [[TMP26]])
; CHECK-NEXT: ret float [[TMP27]]
%3 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vidup.v4i32(i32 0, i32 8)
%4 = extractvalue { <4 x i32>, i32 } %3, 0
%5 = add nsw i32 %1, -1
- %6 = ptrtoint float* %0 to i32
+ %6 = ptrtoint ptr %0 to i32
%7 = insertelement <4 x i32> poison, i32 %6, i32 0
%8 = add <4 x i32> %7, <i32 -32, i32 undef, i32 undef, i32 undef>
%9 = shufflevector <4 x i32> %8, <4 x i32> poison, <4 x i32> zeroinitializer
br i1 %21, label %11, label %22
22: ; preds = %11
- %23 = tail call i32 bitcast (i32 (...)* @vecAddAcrossF32Mve to i32 (<4 x float>)*)(<4 x float> %19)
+ %23 = tail call i32 @vecAddAcrossF32Mve(<4 x float> %19)
%24 = sitofp i32 %23 to float
%25 = tail call float @llvm.fabs.f32(float %24)
ret float %25
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "thumbv8.1m-arm-none-eabi"
-define float @vctp8(float* %0, i32 %1) {
+define float @vctp8(ptr %0, i32 %1) {
; CHECK-LABEL: @vctp8(
; CHECK-NEXT: [[TMP3:%.*]] = tail call { <4 x i32>, i32 } @llvm.arm.mve.vidup.v4i32(i32 0, i32 8)
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP3]], 0
; CHECK-NEXT: [[TMP5:%.*]] = add nsw i32 [[TMP1:%.*]], -1
-; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint float* [[TMP0:%.*]] to i32
+; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[TMP0:%.*]] to i32
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> undef, i32 [[TMP6]], i32 0
; CHECK-NEXT: [[TMP8:%.*]] = add <4 x i32> [[TMP7]], <i32 -32, i32 undef, i32 undef, i32 undef>
; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x i32> [[TMP8]], <4 x i32> undef, <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP21]] = add i32 [[TMP12]], -4
; CHECK-NEXT: br i1 [[TMP20]], label [[TMP11]], label [[TMP22:%.*]]
; CHECK: 22:
-; CHECK-NEXT: [[TMP23:%.*]] = tail call i32 bitcast (i32 (...)* @vecAddAcrossF32Mve to i32 (<4 x float>)*)(<4 x float> [[TMP19]])
+; CHECK-NEXT: [[TMP23:%.*]] = tail call i32 @vecAddAcrossF32Mve(<4 x float> [[TMP19]])
; CHECK-NEXT: [[TMP24:%.*]] = sitofp i32 [[TMP23]] to float
; CHECK-NEXT: [[TMP25:%.*]] = tail call float @llvm.fabs.f32(float [[TMP24]])
; CHECK-NEXT: ret float [[TMP25]]
%3 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vidup.v4i32(i32 0, i32 8)
%4 = extractvalue { <4 x i32>, i32 } %3, 0
%5 = add nsw i32 %1, -1
- %6 = ptrtoint float* %0 to i32
+ %6 = ptrtoint ptr %0 to i32
%7 = insertelement <4 x i32> undef, i32 %6, i32 0
%8 = add <4 x i32> %7, <i32 -32, i32 undef, i32 undef, i32 undef>
%9 = shufflevector <4 x i32> %8, <4 x i32> undef, <4 x i32> zeroinitializer
br i1 %21, label %11, label %22
22: ; preds = %11
- %23 = tail call i32 bitcast (i32 (...)* @vecAddAcrossF32Mve to i32 (<4 x float>)*)(<4 x float> %19)
+ %23 = tail call i32 @vecAddAcrossF32Mve(<4 x float> %19)
%24 = sitofp i32 %23 to float
%25 = tail call float @llvm.fabs.f32(float %24)
ret float %25
}
-define float @vctp16(float* %0, i32 %1) {
+define float @vctp16(ptr %0, i32 %1) {
; CHECK-LABEL: @vctp16(
; CHECK-NEXT: [[TMP3:%.*]] = tail call { <4 x i32>, i32 } @llvm.arm.mve.vidup.v4i32(i32 0, i32 8)
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP3]], 0
; CHECK-NEXT: [[TMP5:%.*]] = add nsw i32 [[TMP1:%.*]], -1
-; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint float* [[TMP0:%.*]] to i32
+; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[TMP0:%.*]] to i32
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> undef, i32 [[TMP6]], i32 0
; CHECK-NEXT: [[TMP8:%.*]] = add <4 x i32> [[TMP7]], <i32 -32, i32 undef, i32 undef, i32 undef>
; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x i32> [[TMP8]], <4 x i32> undef, <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP21]] = add i32 [[TMP12]], -4
; CHECK-NEXT: br i1 [[TMP20]], label [[TMP11]], label [[TMP22:%.*]]
; CHECK: 22:
-; CHECK-NEXT: [[TMP23:%.*]] = tail call i32 bitcast (i32 (...)* @vecAddAcrossF32Mve to i32 (<4 x float>)*)(<4 x float> [[TMP19]])
+; CHECK-NEXT: [[TMP23:%.*]] = tail call i32 @vecAddAcrossF32Mve(<4 x float> [[TMP19]])
; CHECK-NEXT: [[TMP24:%.*]] = sitofp i32 [[TMP23]] to float
; CHECK-NEXT: [[TMP25:%.*]] = tail call float @llvm.fabs.f32(float [[TMP24]])
; CHECK-NEXT: ret float [[TMP25]]
%3 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vidup.v4i32(i32 0, i32 8)
%4 = extractvalue { <4 x i32>, i32 } %3, 0
%5 = add nsw i32 %1, -1
- %6 = ptrtoint float* %0 to i32
+ %6 = ptrtoint ptr %0 to i32
%7 = insertelement <4 x i32> undef, i32 %6, i32 0
%8 = add <4 x i32> %7, <i32 -32, i32 undef, i32 undef, i32 undef>
%9 = shufflevector <4 x i32> %8, <4 x i32> undef, <4 x i32> zeroinitializer
br i1 %21, label %11, label %22
22: ; preds = %11
- %23 = tail call i32 bitcast (i32 (...)* @vecAddAcrossF32Mve to i32 (<4 x float>)*)(<4 x float> %19)
+ %23 = tail call i32 @vecAddAcrossF32Mve(<4 x float> %19)
%24 = sitofp i32 %23 to float
%25 = tail call float @llvm.fabs.f32(float %24)
ret float %25
}
-define float @vctpi32(float* %0, i32 %1) {
+define float @vctpi32(ptr %0, i32 %1) {
; CHECK-LABEL: @vctpi32(
; CHECK-NEXT: [[TMP3:%.*]] = tail call { <4 x i32>, i32 } @llvm.arm.mve.vidup.v4i32(i32 0, i32 8)
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP3]], 0
; CHECK-NEXT: [[TMP5:%.*]] = add nsw i32 [[TMP1:%.*]], -1
-; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint float* [[TMP0:%.*]] to i32
+; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[TMP0:%.*]] to i32
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> undef, i32 [[TMP6]], i32 0
; CHECK-NEXT: [[TMP8:%.*]] = add <4 x i32> [[TMP7]], <i32 -32, i32 undef, i32 undef, i32 undef>
; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x i32> [[TMP8]], <4 x i32> undef, <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP21]] = add i32 [[TMP12]], -4
; CHECK-NEXT: br i1 [[TMP20]], label [[TMP11]], label [[TMP22:%.*]]
; CHECK: 22:
-; CHECK-NEXT: [[TMP23:%.*]] = tail call i32 bitcast (i32 (...)* @vecAddAcrossF32Mve to i32 (<4 x float>)*)(<4 x float> [[TMP19]])
+; CHECK-NEXT: [[TMP23:%.*]] = tail call i32 @vecAddAcrossF32Mve(<4 x float> [[TMP19]])
; CHECK-NEXT: [[TMP24:%.*]] = sitofp i32 [[TMP23]] to float
; CHECK-NEXT: [[TMP25:%.*]] = tail call float @llvm.fabs.f32(float [[TMP24]])
; CHECK-NEXT: ret float [[TMP25]]
%3 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vidup.v4i32(i32 0, i32 8)
%4 = extractvalue { <4 x i32>, i32 } %3, 0
%5 = add nsw i32 %1, -1
- %6 = ptrtoint float* %0 to i32
+ %6 = ptrtoint ptr %0 to i32
%7 = insertelement <4 x i32> undef, i32 %6, i32 0
%8 = add <4 x i32> %7, <i32 -32, i32 undef, i32 undef, i32 undef>
%9 = shufflevector <4 x i32> %8, <4 x i32> undef, <4 x i32> zeroinitializer
br i1 %21, label %11, label %22
22: ; preds = %11
- %23 = tail call i32 bitcast (i32 (...)* @vecAddAcrossF32Mve to i32 (<4 x float>)*)(<4 x float> %19)
+ %23 = tail call i32 @vecAddAcrossF32Mve(<4 x float> %19)
%24 = sitofp i32 %23 to float
%25 = tail call float @llvm.fabs.f32(float %24)
ret float %25
}
-define float @vctpi64(float* %0, i32 %1) {
+define float @vctpi64(ptr %0, i32 %1) {
; CHECK-LABEL: @vctpi64(
; CHECK-NEXT: [[TMP3:%.*]] = tail call { <4 x i32>, i32 } @llvm.arm.mve.vidup.v4i32(i32 0, i32 8)
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP3]], 0
; CHECK-NEXT: [[TMP5:%.*]] = add nsw i32 [[TMP1:%.*]], -1
-; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint float* [[TMP0:%.*]] to i32
+; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[TMP0:%.*]] to i32
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> undef, i32 [[TMP6]], i32 0
; CHECK-NEXT: [[TMP8:%.*]] = add <4 x i32> [[TMP7]], <i32 -32, i32 undef, i32 undef, i32 undef>
; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x i32> [[TMP8]], <4 x i32> undef, <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP23]] = add i32 [[TMP12]], -4
; CHECK-NEXT: br i1 [[TMP22]], label [[TMP11]], label [[TMP24:%.*]]
; CHECK: 24:
-; CHECK-NEXT: [[TMP25:%.*]] = tail call i32 bitcast (i32 (...)* @vecAddAcrossF32Mve to i32 (<4 x float>)*)(<4 x float> [[TMP21]])
+; CHECK-NEXT: [[TMP25:%.*]] = tail call i32 @vecAddAcrossF32Mve(<4 x float> [[TMP21]])
; CHECK-NEXT: [[TMP26:%.*]] = sitofp i32 [[TMP25]] to float
; CHECK-NEXT: [[TMP27:%.*]] = tail call float @llvm.fabs.f32(float [[TMP26]])
; CHECK-NEXT: ret float [[TMP27]]
%3 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vidup.v4i32(i32 0, i32 8)
%4 = extractvalue { <4 x i32>, i32 } %3, 0
%5 = add nsw i32 %1, -1
- %6 = ptrtoint float* %0 to i32
+ %6 = ptrtoint ptr %0 to i32
%7 = insertelement <4 x i32> undef, i32 %6, i32 0
%8 = add <4 x i32> %7, <i32 -32, i32 undef, i32 undef, i32 undef>
%9 = shufflevector <4 x i32> %8, <4 x i32> undef, <4 x i32> zeroinitializer
br i1 %21, label %11, label %22
22: ; preds = %11
- %23 = tail call i32 bitcast (i32 (...)* @vecAddAcrossF32Mve to i32 (<4 x float>)*)(<4 x float> %19)
+ %23 = tail call i32 @vecAddAcrossF32Mve(<4 x float> %19)
%24 = sitofp i32 %23 to float
%25 = tail call float @llvm.fabs.f32(float %24)
ret float %25
declare void @_Z3usei(i32)
!nvvm.annotations = !{!0}
-!0 = !{void (i64, i64, i64)* @trunc_is_free, !"kernel", i32 1}
+!0 = !{ptr @trunc_is_free, !"kernel", i32 1}
; CHECK: %lsr.iv
; CHECK-NOT: %dummyout
; CHECK: ret
-define i64 @test(i64 %count, float* nocapture %srcrow, i32* nocapture %destrow) nounwind uwtable ssp {
+define i64 @test(i64 %count, ptr nocapture %srcrow, ptr nocapture %destrow) nounwind uwtable ssp {
entry:
%cmp34 = icmp eq i64 %count, 0
br i1 %cmp34, label %for.end29, label %for.body
for.body: ; preds = %entry, %for.body
%dummyiv = phi i64 [ %dummycnt, %for.body ], [ 0, %entry ]
%indvars.iv39 = phi i64 [ %indvars.iv.next40, %for.body ], [ 0, %entry ]
- %dp.036 = phi i32* [ %add.ptr, %for.body ], [ %destrow, %entry ]
- %p.035 = phi float* [ %incdec.ptr4, %for.body ], [ %srcrow, %entry ]
- %incdec.ptr = getelementptr inbounds float, float* %p.035, i64 1
- %0 = load float, float* %incdec.ptr, align 4
- %incdec.ptr2 = getelementptr inbounds float, float* %p.035, i64 2
- %1 = load float, float* %incdec.ptr2, align 4
- %incdec.ptr3 = getelementptr inbounds float, float* %p.035, i64 3
- %2 = load float, float* %incdec.ptr3, align 4
- %incdec.ptr4 = getelementptr inbounds float, float* %p.035, i64 4
- %3 = load float, float* %incdec.ptr4, align 4
- %4 = load i32, i32* %dp.036, align 4
+ %dp.036 = phi ptr [ %add.ptr, %for.body ], [ %destrow, %entry ]
+ %p.035 = phi ptr [ %incdec.ptr4, %for.body ], [ %srcrow, %entry ]
+ %incdec.ptr = getelementptr inbounds float, ptr %p.035, i64 1
+ %0 = load float, ptr %incdec.ptr, align 4
+ %incdec.ptr2 = getelementptr inbounds float, ptr %p.035, i64 2
+ %1 = load float, ptr %incdec.ptr2, align 4
+ %incdec.ptr3 = getelementptr inbounds float, ptr %p.035, i64 3
+ %2 = load float, ptr %incdec.ptr3, align 4
+ %incdec.ptr4 = getelementptr inbounds float, ptr %p.035, i64 4
+ %3 = load float, ptr %incdec.ptr4, align 4
+ %4 = load i32, ptr %dp.036, align 4
%conv5 = fptoui float %0 to i32
%or = or i32 %4, %conv5
- %arrayidx6 = getelementptr inbounds i32, i32* %dp.036, i64 1
- %5 = load i32, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %dp.036, i64 1
+ %5 = load i32, ptr %arrayidx6, align 4
%conv7 = fptoui float %1 to i32
%or8 = or i32 %5, %conv7
- %arrayidx9 = getelementptr inbounds i32, i32* %dp.036, i64 2
- %6 = load i32, i32* %arrayidx9, align 4
+ %arrayidx9 = getelementptr inbounds i32, ptr %dp.036, i64 2
+ %6 = load i32, ptr %arrayidx9, align 4
%conv10 = fptoui float %2 to i32
%or11 = or i32 %6, %conv10
- %arrayidx12 = getelementptr inbounds i32, i32* %dp.036, i64 3
- %7 = load i32, i32* %arrayidx12, align 4
+ %arrayidx12 = getelementptr inbounds i32, ptr %dp.036, i64 3
+ %7 = load i32, ptr %arrayidx12, align 4
%conv13 = fptoui float %3 to i32
%or14 = or i32 %7, %conv13
- store i32 %or, i32* %dp.036, align 4
- store i32 %or8, i32* %arrayidx6, align 4
- store i32 %or11, i32* %arrayidx9, align 4
- store i32 %or14, i32* %arrayidx12, align 4
- %add.ptr = getelementptr inbounds i32, i32* %dp.036, i64 4
+ store i32 %or, ptr %dp.036, align 4
+ store i32 %or8, ptr %arrayidx6, align 4
+ store i32 %or11, ptr %arrayidx9, align 4
+ store i32 %or14, ptr %arrayidx12, align 4
+ %add.ptr = getelementptr inbounds i32, ptr %dp.036, i64 4
%indvars.iv.next40 = add i64 %indvars.iv39, 4
%dummycnt = add i64 %dummyiv, 1
%cmp = icmp ult i64 %indvars.iv.next40, %count
for.body23: ; preds = %for.body23, %for.body23.lr.ph
%indvars.iv = phi i64 [ 0, %for.body23.lr.ph ], [ %indvars.iv.next, %for.body23 ]
- %dp.132 = phi i32* [ %add.ptr, %for.body23.lr.ph ], [ %incdec.ptr28, %for.body23 ]
- %p.131 = phi float* [ %incdec.ptr4, %for.body23.lr.ph ], [ %incdec.ptr24, %for.body23 ]
- %incdec.ptr24 = getelementptr inbounds float, float* %p.131, i64 1
- %9 = load float, float* %incdec.ptr24, align 4
- %10 = load i32, i32* %dp.132, align 4
+ %dp.132 = phi ptr [ %add.ptr, %for.body23.lr.ph ], [ %incdec.ptr28, %for.body23 ]
+ %p.131 = phi ptr [ %incdec.ptr4, %for.body23.lr.ph ], [ %incdec.ptr24, %for.body23 ]
+ %incdec.ptr24 = getelementptr inbounds float, ptr %p.131, i64 1
+ %9 = load float, ptr %incdec.ptr24, align 4
+ %10 = load i32, ptr %dp.132, align 4
%conv25 = fptoui float %9 to i32
%or26 = or i32 %10, %conv25
- store i32 %or26, i32* %dp.132, align 4
+ store i32 %or26, ptr %dp.132, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
- %incdec.ptr28 = getelementptr inbounds i32, i32* %dp.132, i64 1
+ %incdec.ptr28 = getelementptr inbounds i32, ptr %dp.132, i64 1
%exitcond = icmp eq i64 %indvars.iv.next, %8
br i1 %exitcond, label %for.end29, label %for.body23
; RUN: opt < %s -S -loop-reduce -mtriple=x86_64-- -mcpu=core2 | FileCheck %s
declare i1 @check() nounwind
-declare i1 @foo(i8*, i8*, i8*) nounwind
+declare i1 @foo(ptr, ptr, ptr) nounwind
; Check that redundant phi elimination ran
-define i32 @test(i8* %base) nounwind uwtable ssp {
+define i32 @test(ptr %base) nounwind uwtable ssp {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[WHILE_BODY_LR_PH_I:%.*]]
; CHECK: for.body.i:
; CHECK-NEXT: [[INDVARS_IV_I:%.*]] = phi i64 [ 0, [[WHILE_BODY_I]] ], [ [[INDVARS_IV_NEXT_I:%.*]], [[FOR_BODY_I]] ]
; CHECK-NEXT: [[ADD_PTR_SUM:%.*]] = add i64 [[ADD_PTR_SUM_I]], [[INDVARS_IV_I]]
-; CHECK-NEXT: [[ARRAYIDX22_I:%.*]] = getelementptr inbounds i8, i8* [[BASE:%.*]], i64 [[ADD_PTR_SUM]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i8, i8* [[ARRAYIDX22_I]], align 1
+; CHECK-NEXT: [[ARRAYIDX22_I:%.*]] = getelementptr inbounds i8, ptr [[BASE:%.*]], i64 [[ADD_PTR_SUM]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX22_I]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT_I]] = add i64 [[INDVARS_IV_I]], 1
; CHECK-NEXT: [[CMP:%.*]] = call i1 @check() #[[ATTR3:[0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_END_I:%.*]], label [[FOR_BODY_I]]
; CHECK: for.end.i:
-; CHECK-NEXT: [[ADD_PTR_I144:%.*]] = getelementptr inbounds i8, i8* [[BASE]], i64 [[ADD_PTR_SUM_I]]
-; CHECK-NEXT: [[CMP2:%.*]] = tail call i1 @foo(i8* [[ADD_PTR_I144]], i8* [[ADD_PTR_I144]], i8* undef) #[[ATTR3]]
+; CHECK-NEXT: [[ADD_PTR_I144:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[ADD_PTR_SUM_I]]
+; CHECK-NEXT: [[CMP2:%.*]] = tail call i1 @foo(ptr [[ADD_PTR_I144]], ptr [[ADD_PTR_I144]], ptr undef) #[[ATTR3]]
; CHECK-NEXT: br i1 [[CMP2]], label [[COND_TRUE29_I]], label [[COND_FALSE35_I:%.*]]
; CHECK: cond.true29.i:
; CHECK-NEXT: [[INDVARS_IV_NEXT8_I]] = add i64 [[INDVARS_IV7_I]], 16
for.body.i: ; preds = %for.body.i, %while.body.i
%indvars.iv.i = phi i64 [ 0, %while.body.i ], [ %indvars.iv.next.i, %for.body.i ]
%add.ptr.sum = add i64 %add.ptr.sum.i, %indvars.iv.i
- %arrayidx22.i = getelementptr inbounds i8, i8* %base, i64 %add.ptr.sum
- %0 = load i8, i8* %arrayidx22.i, align 1
+ %arrayidx22.i = getelementptr inbounds i8, ptr %base, i64 %add.ptr.sum
+ %0 = load i8, ptr %arrayidx22.i, align 1
%indvars.iv.next.i = add i64 %indvars.iv.i, 1
%cmp = call i1 @check() nounwind
br i1 %cmp, label %for.end.i, label %for.body.i
for.end.i: ; preds = %for.body.i
- %add.ptr.i144 = getelementptr inbounds i8, i8* %base, i64 %add.ptr.sum.i
- %cmp2 = tail call i1 @foo(i8* %add.ptr.i144, i8* %add.ptr.i144, i8* undef) nounwind
+ %add.ptr.i144 = getelementptr inbounds i8, ptr %base, i64 %add.ptr.sum.i
+ %cmp2 = tail call i1 @foo(ptr %add.ptr.i144, ptr %add.ptr.i144, ptr undef) nounwind
br i1 %cmp2, label %cond.true29.i, label %cond.false35.i
cond.true29.i: ; preds = %for.end.i
; CHECK-NEXT: [[CMP469:%.*]] = icmp slt i32 [[TMP0]], [[N:%.*]]
; CHECK-NEXT: br i1 [[CMP469]], label [[FOR_BODY471:%.*]], label [[FOR_INC498_PREHEADER:%.*]]
; CHECK: for.body471:
-; CHECK-NEXT: [[FIRST:%.*]] = getelementptr inbounds [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771], [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771]* @tags, i64 0, i64 [[INDVARS_IV1163]], i32 1
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[FIRST]], align 4
+; CHECK-NEXT: [[FIRST:%.*]] = getelementptr inbounds [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771], ptr @tags, i64 0, i64 [[INDVARS_IV1163]], i32 1
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[FIRST]], align 4
; CHECK-NEXT: br i1 false, label [[IF_THEN477]], label [[FOR_INC498_PREHEADER]]
; CHECK: for.inc498.preheader:
; CHECK-NEXT: br label [[FOR_INC498:%.*]]
for.cond468: ; preds = %if.then477, %entry
%indvars.iv1163 = phi i64 [ %indvars.iv.next1164, %if.then477 ], [ 1, %entry ]
- %k.0.in = phi i32* [ %last, %if.then477 ], [ getelementptr inbounds ([5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771], [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771]* @tags, i64 0, i64 0, i32 2), %entry ]
- %k.0 = load i32, i32* %k.0.in, align 4
+ %k.0.in = phi ptr [ %last, %if.then477 ], [ getelementptr inbounds ([5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771], ptr @tags, i64 0, i64 0, i32 2), %entry ]
+ %k.0 = load i32, ptr %k.0.in, align 4
%0 = trunc i64 %indvars.iv1163 to i32
%cmp469 = icmp slt i32 %0, %n
br i1 %cmp469, label %for.body471, label %for.inc498
for.body471: ; preds = %for.cond468
- %first = getelementptr inbounds [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771], [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771]* @tags, i64 0, i64 %indvars.iv1163, i32 1
- %1 = load i32, i32* %first, align 4
+ %first = getelementptr inbounds [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771], ptr @tags, i64 0, i64 %indvars.iv1163, i32 1
+ %1 = load i32, ptr %first, align 4
br i1 undef, label %if.then477, label %for.inc498
if.then477: ; preds = %for.body471
- %last = getelementptr inbounds [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771], [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771]* @tags, i64 0, i64 %indvars.iv1163, i32 2
+ %last = getelementptr inbounds [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771], ptr @tags, i64 0, i64 %indvars.iv1163, i32 2
%indvars.iv.next1164 = add i64 %indvars.iv1163, 1
br label %for.cond468
; CHECK-NEXT: [[TMP0:%.*]] = add nsw i64 [[INDVARS_IV_I_SV_PHI]], [[INDVARS_IV8_I_SV_PHI26:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
; CHECK-NEXT: [[MUL_I_US_I:%.*]] = mul nsw i32 0, [[TMP1]]
-; CHECK-NEXT: [[ARRAYIDX5_US_I:%.*]] = getelementptr inbounds double, double* [[U:%.*]], i64 [[INDVARS_IV_I_SV_PHI]]
-; CHECK-NEXT: [[TMP2:%.*]] = load double, double* [[ARRAYIDX5_US_I]], align 8
+; CHECK-NEXT: [[ARRAYIDX5_US_I:%.*]] = getelementptr inbounds double, ptr [[U:%.*]], i64 [[INDVARS_IV_I_SV_PHI]]
+; CHECK-NEXT: [[TMP2:%.*]] = load double, ptr [[ARRAYIDX5_US_I]], align 8
; CHECK-NEXT: [[INDVARS_IV_NEXT_I]] = add i64 [[INDVARS_IV_I_SV_PHI]], 1
; CHECK-NEXT: br i1 undef, label [[FOR_INC8_US_I:%.*]], label [[MESHBB]]
; CHECK: for.body3.lr.ph.us.i.loopexit:
; CHECK-NEXT: br label [[FOR_BODY3_LR_PH_US_I]]
; CHECK: for.body3.lr.ph.us.i:
; CHECK-NEXT: [[INDVARS_IV8_I_SV_PHI26]] = phi i64 [ undef, [[MESHBB1]] ], [ [[INDVARS_IV8_I_SV_PHI24:%.*]], [[FOR_BODY3_LR_PH_US_I_LOOPEXIT:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX_US_I:%.*]] = getelementptr inbounds double, double* undef, i64 [[INDVARS_IV8_I_SV_PHI26]]
+; CHECK-NEXT: [[ARRAYIDX_US_I:%.*]] = getelementptr inbounds double, ptr undef, i64 [[INDVARS_IV8_I_SV_PHI26]]
; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDVARS_IV8_I_SV_PHI26]], 1
; CHECK-NEXT: br label [[FOR_BODY3_US_I:%.*]]
; CHECK: for.inc8.us.i2:
%0 = add nsw i64 %indvars.iv.i.SV.phi, %indvars.iv8.i.SV.phi26
%1 = trunc i64 %0 to i32
%mul.i.us.i = mul nsw i32 0, %1
- %arrayidx5.us.i = getelementptr inbounds double, double* %u, i64 %indvars.iv.i.SV.phi
- %2 = load double, double* %arrayidx5.us.i, align 8
+ %arrayidx5.us.i = getelementptr inbounds double, ptr %u, i64 %indvars.iv.i.SV.phi
+ %2 = load double, ptr %arrayidx5.us.i, align 8
%indvars.iv.next.i = add i64 %indvars.iv.i.SV.phi, 1
br i1 undef, label %for.inc8.us.i, label %meshBB
for.body3.lr.ph.us.i: ; preds = %meshBB1, %meshBB
%indvars.iv8.i.SV.phi26 = phi i64 [ undef, %meshBB1 ], [ %indvars.iv8.i.SV.phi24, %meshBB ]
- %arrayidx.us.i = getelementptr inbounds double, double* undef, i64 %indvars.iv8.i.SV.phi26
+ %arrayidx.us.i = getelementptr inbounds double, ptr undef, i64 %indvars.iv8.i.SV.phi26
%3 = add i64 %indvars.iv8.i.SV.phi26, 1
br label %for.body3.us.i
%0 = phi i32 [ %13, %not_zero48.us ], [ undef, %cHeapLvb.exit ]
%indvars.iv.next.us = add nuw nsw i64 %indvars.iv.us, 1
%1 = add i32 %0, 2
- %2 = getelementptr inbounds i32, i32 addrspace(1)* undef, i64 %indvars.iv.next.us
- %3 = load i32, i32 addrspace(1)* %2, align 4
+ %2 = getelementptr inbounds i32, ptr addrspace(1) undef, i64 %indvars.iv.next.us
+ %3 = load i32, ptr addrspace(1) %2, align 4
%4 = add i32 %0, 3
- %5 = load i32, i32 addrspace(1)* undef, align 4
+ %5 = load i32, ptr addrspace(1) undef, align 4
%6 = sub i32 undef, %5
%factor.us.2 = shl i32 %6, 1
%7 = add i32 %factor.us.2, %1
- %8 = load i32, i32 addrspace(1)* undef, align 4
+ %8 = load i32, ptr addrspace(1) undef, align 4
%9 = sub i32 %7, %8
%factor.us.3 = shl i32 %9, 1
%10 = add i32 %factor.us.3, %4
- %11 = load i32, i32 addrspace(1)* undef, align 4
+ %11 = load i32, ptr addrspace(1) undef, align 4
%12 = sub i32 %10, %11
%factor.us.4 = shl i32 %12, 1
%13 = add i32 %0, 8
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-define void @foo(i32 %size, i32 %nsteps, i8* nocapture %maxarray, i8* nocapture readnone %buffer, i32 %init) local_unnamed_addr #0 {
+define void @foo(i32 %size, i32 %nsteps, ptr nocapture %maxarray, ptr nocapture readnone %buffer, i32 %init) local_unnamed_addr #0 {
entry:
%cmp25 = icmp sgt i32 %nsteps, 0
br i1 %cmp25, label %for.cond1.preheader.lr.ph, label %for.end12
; CHECK-LABEL: for.body3:
; CHECK-NEXT: [[LSR:%[^,]+]] = phi i64 [ 1, %for.body3.lr.ph ], [ {{.*}}, %for.body3 ]
; CHECK-NOT: = phi i64
-; CHECK-NEXT: [[LOADADDR:%[^,]+]] = getelementptr i8, i8* {{.*}}, i64 [[LSR]]
-; CHECK-NEXT: = load i8, i8* [[LOADADDR]], align 1
+; CHECK-NEXT: [[LOADADDR:%[^,]+]] = getelementptr i8, ptr {{.*}}, i64 [[LSR]]
+; CHECK-NEXT: = load i8, ptr [[LOADADDR]], align 1
; CHECK: br i1 %exitcond, label %for.inc10.loopexit, label %for.body3
for.body3: ; preds = %for.body3, %for.body3.lr.ph
%indvars.iv = phi i64 [ 1, %for.body3.lr.ph ], [ %indvars.iv.next, %for.body3 ]
%t5 = trunc i64 %indvars.iv to i8
%t3 = add nsw i64 %t1, %indvars.iv
- %arrayidx = getelementptr inbounds i8, i8* %maxarray, i64 %t3
- %t4 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %maxarray, i64 %t3
+ %t4 = load i8, ptr %arrayidx, align 1
%add5 = add i8 %t4, %t5
%add6 = add i8 %add5, %t2
- %arrayidx9 = getelementptr inbounds i8, i8* %maxarray, i64 %indvars.iv
- store i8 %add6, i8* %arrayidx9, align 1
+ %arrayidx9 = getelementptr inbounds i8, ptr %maxarray, i64 %indvars.iv
+ store i8 %add6, ptr %arrayidx9, align 1
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
br i1 %exitcond, label %for.inc10.loopexit, label %for.body3
declare void @maybe_throws()
declare void @use1(i1)
-define void @is_not_42(i8* %baseptr, i8* %finalptr) local_unnamed_addr align 2 personality i8* undef {
+define void @is_not_42(ptr %baseptr, ptr %finalptr) local_unnamed_addr align 2 personality ptr undef {
; CHECK-LABEL: @is_not_42(
; CHECK-NEXT: preheader:
; CHECK-NEXT: br label [[HEADER:%.*]]
; CHECK: header:
-; CHECK-NEXT: [[PTR:%.*]] = phi i8* [ [[INCPTR:%.*]], [[LATCH:%.*]] ], [ [[BASEPTR:%.*]], [[PREHEADER:%.*]] ]
+; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[INCPTR:%.*]], [[LATCH:%.*]] ], [ [[BASEPTR:%.*]], [[PREHEADER:%.*]] ]
; CHECK-NEXT: invoke void @maybe_throws()
; CHECK-NEXT: to label [[LATCH]] unwind label [[LPAD:%.*]]
; CHECK: lpad:
-; CHECK-NEXT: [[TMP0:%.*]] = landingpad { i8*, i32 }
-; CHECK-NEXT: catch i8* inttoptr (i64 42 to i8*)
-; CHECK-NEXT: [[PTR_IS_NOT_42:%.*]] = icmp ne i8* [[PTR]], inttoptr (i64 42 to i8*)
+; CHECK-NEXT: [[TMP0:%.*]] = landingpad { ptr, i32 }
+; CHECK-NEXT: catch ptr inttoptr (i64 42 to ptr)
+; CHECK-NEXT: [[PTR_IS_NOT_42:%.*]] = icmp ne ptr [[PTR]], inttoptr (i64 42 to ptr)
; CHECK-NEXT: call void @use1(i1 [[PTR_IS_NOT_42]])
; CHECK-NEXT: ret void
; CHECK: latch:
-; CHECK-NEXT: [[INCPTR]] = getelementptr inbounds i8, i8* [[PTR]], i64 1
+; CHECK-NEXT: [[INCPTR]] = getelementptr inbounds i8, ptr [[PTR]], i64 1
; CHECK-NEXT: br label [[HEADER]]
;
preheader:
br label %header
header:
- %ptr = phi i8* [ %incptr, %latch ], [ %baseptr, %preheader ]
+ %ptr = phi ptr [ %incptr, %latch ], [ %baseptr, %preheader ]
invoke void @maybe_throws() to label %latch unwind label %lpad
lpad:
- landingpad { i8*, i32 } catch i8* inttoptr (i64 42 to i8*)
- %ptr_is_not_42 = icmp ne i8* %ptr, inttoptr (i64 42 to i8*)
+ landingpad { ptr, i32 } catch ptr inttoptr (i64 42 to ptr)
+ %ptr_is_not_42 = icmp ne ptr %ptr, inttoptr (i64 42 to ptr)
call void @use1(i1 %ptr_is_not_42)
ret void
latch:
- %incptr = getelementptr inbounds i8, i8* %ptr, i64 1
+ %incptr = getelementptr inbounds i8, ptr %ptr, i64 1
br label %header
}
declare void @maybe_throws()
declare void @use1(i1)
-define void @is_not_null(i8* %baseptr) local_unnamed_addr align 2 personality i8* undef {
+define void @is_not_null(ptr %baseptr) local_unnamed_addr align 2 personality ptr undef {
; CHECK-LABEL: @is_not_null(
; CHECK-NEXT: preheader:
; CHECK-NEXT: br label [[HEADER:%.*]]
; CHECK: header:
-; CHECK-NEXT: [[PTR:%.*]] = phi i8* [ [[INCPTR:%.*]], [[LATCH:%.*]] ], [ [[BASEPTR:%.*]], [[PREHEADER:%.*]] ]
+; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[INCPTR:%.*]], [[LATCH:%.*]] ], [ [[BASEPTR:%.*]], [[PREHEADER:%.*]] ]
; CHECK-NEXT: invoke void @maybe_throws()
; CHECK-NEXT: to label [[LATCH]] unwind label [[LPAD:%.*]]
; CHECK: lpad:
-; CHECK-NEXT: [[TMP0:%.*]] = landingpad { i8*, i32 }
-; CHECK-NEXT: catch i8* null
-; CHECK-NEXT: [[PTR_IS_NOT_NULL:%.*]] = icmp ne i8* [[PTR]], null
+; CHECK-NEXT: [[TMP0:%.*]] = landingpad { ptr, i32 }
+; CHECK-NEXT: catch ptr null
+; CHECK-NEXT: [[PTR_IS_NOT_NULL:%.*]] = icmp ne ptr [[PTR]], null
; CHECK-NEXT: call void @use1(i1 [[PTR_IS_NOT_NULL]])
; CHECK-NEXT: ret void
; CHECK: latch:
-; CHECK-NEXT: [[INCPTR]] = getelementptr inbounds i8, i8* [[PTR]], i64 1
+; CHECK-NEXT: [[INCPTR]] = getelementptr inbounds i8, ptr [[PTR]], i64 1
; CHECK-NEXT: br label [[HEADER]]
;
preheader:
br label %header
header:
- %ptr = phi i8* [ %incptr, %latch ], [ %baseptr, %preheader ]
+ %ptr = phi ptr [ %incptr, %latch ], [ %baseptr, %preheader ]
invoke void @maybe_throws() to label %latch unwind label %lpad
lpad:
- landingpad { i8*, i32 } catch i8* null
- %ptr_is_not_null = icmp ne i8* %ptr, null
+ landingpad { ptr, i32 } catch ptr null
+ %ptr_is_not_null = icmp ne ptr %ptr, null
call void @use1(i1 %ptr_is_not_null)
ret void
latch:
- %incptr = getelementptr inbounds i8, i8* %ptr, i64 1
+ %incptr = getelementptr inbounds i8, ptr %ptr, i64 1
br label %header
}
; Make sure re-used instructions do not impact the insertion points for SCEV
; expansion.
-define void @test(double* %ioptr, i32 %X, double* %start, double* %end) {
+define void @test(ptr %ioptr, i32 %X, ptr %start, ptr %end) {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[IDX_EXT32:%.*]] = sext i32 [[X:%.*]] to i64
; CHECK: for.body15:
; CHECK-NEXT: br label [[FOR_BODY37:%.*]]
; CHECK: for.body37:
-; CHECK-NEXT: [[P0R_0335:%.*]] = phi double* [ [[ADD_PTR94:%.*]], [[FOR_BODY37]] ], [ [[START:%.*]], [[FOR_BODY15]] ]
-; CHECK-NEXT: [[ADD_PTR94]] = getelementptr inbounds double, double* [[P0R_0335]], i64 1
-; CHECK-NEXT: [[P0R_03351:%.*]] = bitcast double* [[P0R_0335]] to i8*
-; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, i8* [[P0R_03351]], i64 [[TMP1]]
-; CHECK-NEXT: [[UGLYGEP2:%.*]] = bitcast i8* [[UGLYGEP]] to double*
-; CHECK-NEXT: [[F1I_0:%.*]] = load double, double* [[UGLYGEP2]], align 8
+; CHECK-NEXT: [[P0R_0335:%.*]] = phi ptr [ [[ADD_PTR94:%.*]], [[FOR_BODY37]] ], [ [[START:%.*]], [[FOR_BODY15]] ]
+; CHECK-NEXT: [[ADD_PTR94]] = getelementptr inbounds double, ptr [[P0R_0335]], i64 1
+; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[P0R_0335]], i64 [[TMP1]]
+; CHECK-NEXT: [[F1I_0:%.*]] = load double, ptr [[UGLYGEP]], align 8
; CHECK-NEXT: call void @use(double [[F1I_0]])
-; CHECK-NEXT: [[EC0:%.*]] = icmp eq double* [[ADD_PTR94]], [[END:%.*]]
+; CHECK-NEXT: [[EC0:%.*]] = icmp eq ptr [[ADD_PTR94]], [[END:%.*]]
; CHECK-NEXT: br i1 [[EC0]], label [[FOR_BODY37]], label [[FOR_END_LOOPEXIT:%.*]]
; CHECK: for.end.loopexit:
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.end:
-; CHECK-NEXT: [[P0R_0_LCSSA:%.*]] = phi double* [ [[ADD_PTR94]], [[FOR_END_LOOPEXIT]] ]
-; CHECK-NEXT: [[EC1:%.*]] = icmp eq double* [[P0R_0_LCSSA]], [[END]]
+; CHECK-NEXT: [[P0R_0_LCSSA:%.*]] = phi ptr [ [[ADD_PTR94]], [[FOR_END_LOOPEXIT]] ]
+; CHECK-NEXT: [[EC1:%.*]] = icmp eq ptr [[P0R_0_LCSSA]], [[END]]
; CHECK-NEXT: br i1 [[EC1]], label [[FOR_BODY15]], label [[FOR_INC133:%.*]]
; CHECK: for.inc133:
; CHECK-NEXT: ret void
br label %for.body37
for.body37: ; preds = %for.body37, %for.body37.preheader
- %p0r.0335 = phi double* [ %add.ptr94, %for.body37 ], [ %start, %for.body15 ]
- %add.ptr94 = getelementptr inbounds double, double* %p0r.0335, i64 1
- %f1i.0.in = getelementptr inbounds double, double* %add.ptr94, i64 %idx.ext32
- %f1i.0 = load double, double* %f1i.0.in, align 8
+ %p0r.0335 = phi ptr [ %add.ptr94, %for.body37 ], [ %start, %for.body15 ]
+ %add.ptr94 = getelementptr inbounds double, ptr %p0r.0335, i64 1
+ %f1i.0.in = getelementptr inbounds double, ptr %add.ptr94, i64 %idx.ext32
+ %f1i.0 = load double, ptr %f1i.0.in, align 8
call void @use(double %f1i.0)
- %ec0 = icmp eq double* %add.ptr94, %end
+ %ec0 = icmp eq ptr %add.ptr94, %end
br i1 %ec0, label %for.body37, label %for.end.loopexit
for.end.loopexit: ; preds = %for.body37
br label %for.end
for.end: ; preds = %for.end.loopexit, %for.body15
- %p0r.0.lcssa = phi double* [ %add.ptr94, %for.end.loopexit ]
- %ec1 = icmp eq double* %p0r.0.lcssa, %end
+ %p0r.0.lcssa = phi ptr [ %add.ptr94, %for.end.loopexit ]
+ %ec1 = icmp eq ptr %p0r.0.lcssa, %end
br i1 %ec1, label %for.body15, label %for.inc133
for.inc133: ; preds = %for.end
target triple = "x86_64-unknown-unknown"
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @incorrect_offset_scaling(i64, i64*) {
+define void @incorrect_offset_scaling(i64, ptr) {
top:
br label %L
if6: ; preds = %idxend.8
%r2 = add i64 %0, -1
- %r3 = load i64, i64* %1, align 8
+ %r3 = load i64, ptr %1, align 8
; CHECK: %r2 = add i64 %0, -1
; CHECK: %r3 = load i64
br label %ib
; CHECK: %r4 = mul i64 %r3, %lsr.iv
; CHECK: %r5 = add i64 %r2, %r4
; CHECK: %r6 = icmp ult i64 %r5, undef
-; CHECK: %r7 = getelementptr i64, i64* undef, i64 %r5
- %r7 = getelementptr i64, i64* undef, i64 %r5
- store i64 1, i64* %r7, align 8
+; CHECK: %r7 = getelementptr i64, ptr undef, i64 %r5
+ %r7 = getelementptr i64, ptr undef, i64 %r5
+ store i64 1, ptr %r7, align 8
br label %L
}
; X86: add
; X86: add
; X86: %for.body.3
-define void @sharedidx(i8* nocapture %a, i8* nocapture %b, i8* nocapture %c, i32 %s, i32 %len) nounwind ssp {
+define void @sharedidx(ptr nocapture %a, ptr nocapture %b, ptr nocapture %c, i32 %s, i32 %len) nounwind ssp {
entry:
%cmp8 = icmp eq i32 %len, 0
br i1 %cmp8, label %for.end, label %for.body
for.body: ; preds = %entry, %for.body.3
%i.09 = phi i32 [ %add5.3, %for.body.3 ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i8, i8* %a, i32 %i.09
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %a, i32 %i.09
+ %0 = load i8, ptr %arrayidx, align 1
%conv6 = zext i8 %0 to i32
- %arrayidx1 = getelementptr inbounds i8, i8* %b, i32 %i.09
- %1 = load i8, i8* %arrayidx1, align 1
+ %arrayidx1 = getelementptr inbounds i8, ptr %b, i32 %i.09
+ %1 = load i8, ptr %arrayidx1, align 1
%conv27 = zext i8 %1 to i32
%add = add nsw i32 %conv27, %conv6
%conv3 = trunc i32 %add to i8
- %arrayidx4 = getelementptr inbounds i8, i8* %c, i32 %i.09
- store i8 %conv3, i8* %arrayidx4, align 1
+ %arrayidx4 = getelementptr inbounds i8, ptr %c, i32 %i.09
+ store i8 %conv3, ptr %arrayidx4, align 1
%add5 = add i32 %i.09, %s
%cmp = icmp ult i32 %add5, %len
br i1 %cmp, label %for.body.1, label %for.end
ret void
for.body.1: ; preds = %for.body
- %arrayidx.1 = getelementptr inbounds i8, i8* %a, i32 %add5
- %2 = load i8, i8* %arrayidx.1, align 1
+ %arrayidx.1 = getelementptr inbounds i8, ptr %a, i32 %add5
+ %2 = load i8, ptr %arrayidx.1, align 1
%conv6.1 = zext i8 %2 to i32
- %arrayidx1.1 = getelementptr inbounds i8, i8* %b, i32 %add5
- %3 = load i8, i8* %arrayidx1.1, align 1
+ %arrayidx1.1 = getelementptr inbounds i8, ptr %b, i32 %add5
+ %3 = load i8, ptr %arrayidx1.1, align 1
%conv27.1 = zext i8 %3 to i32
%add.1 = add nsw i32 %conv27.1, %conv6.1
%conv3.1 = trunc i32 %add.1 to i8
- %arrayidx4.1 = getelementptr inbounds i8, i8* %c, i32 %add5
- store i8 %conv3.1, i8* %arrayidx4.1, align 1
+ %arrayidx4.1 = getelementptr inbounds i8, ptr %c, i32 %add5
+ store i8 %conv3.1, ptr %arrayidx4.1, align 1
%add5.1 = add i32 %add5, %s
%cmp.1 = icmp ult i32 %add5.1, %len
br i1 %cmp.1, label %for.body.2, label %for.end
for.body.2: ; preds = %for.body.1
- %arrayidx.2 = getelementptr inbounds i8, i8* %a, i32 %add5.1
- %4 = load i8, i8* %arrayidx.2, align 1
+ %arrayidx.2 = getelementptr inbounds i8, ptr %a, i32 %add5.1
+ %4 = load i8, ptr %arrayidx.2, align 1
%conv6.2 = zext i8 %4 to i32
- %arrayidx1.2 = getelementptr inbounds i8, i8* %b, i32 %add5.1
- %5 = load i8, i8* %arrayidx1.2, align 1
+ %arrayidx1.2 = getelementptr inbounds i8, ptr %b, i32 %add5.1
+ %5 = load i8, ptr %arrayidx1.2, align 1
%conv27.2 = zext i8 %5 to i32
%add.2 = add nsw i32 %conv27.2, %conv6.2
%conv3.2 = trunc i32 %add.2 to i8
- %arrayidx4.2 = getelementptr inbounds i8, i8* %c, i32 %add5.1
- store i8 %conv3.2, i8* %arrayidx4.2, align 1
+ %arrayidx4.2 = getelementptr inbounds i8, ptr %c, i32 %add5.1
+ store i8 %conv3.2, ptr %arrayidx4.2, align 1
%add5.2 = add i32 %add5.1, %s
%cmp.2 = icmp ult i32 %add5.2, %len
br i1 %cmp.2, label %for.body.3, label %for.end
for.body.3: ; preds = %for.body.2
- %arrayidx.3 = getelementptr inbounds i8, i8* %a, i32 %add5.2
- %6 = load i8, i8* %arrayidx.3, align 1
+ %arrayidx.3 = getelementptr inbounds i8, ptr %a, i32 %add5.2
+ %6 = load i8, ptr %arrayidx.3, align 1
%conv6.3 = zext i8 %6 to i32
- %arrayidx1.3 = getelementptr inbounds i8, i8* %b, i32 %add5.2
- %7 = load i8, i8* %arrayidx1.3, align 1
+ %arrayidx1.3 = getelementptr inbounds i8, ptr %b, i32 %add5.2
+ %7 = load i8, ptr %arrayidx1.3, align 1
%conv27.3 = zext i8 %7 to i32
%add.3 = add nsw i32 %conv27.3, %conv6.3
%conv3.3 = trunc i32 %add.3 to i8
- %arrayidx4.3 = getelementptr inbounds i8, i8* %c, i32 %add5.2
- store i8 %conv3.3, i8* %arrayidx4.3, align 1
+ %arrayidx4.3 = getelementptr inbounds i8, ptr %c, i32 %add5.2
+ store i8 %conv3.3, ptr %arrayidx4.3, align 1
%add5.3 = add i32 %add5.2, %s
%cmp.3 = icmp ult i32 %add5.3, %len
br i1 %cmp.3, label %for.body, label %for.end
@c = dso_local local_unnamed_addr global i8 0, align 1, !dbg !9
define dso_local signext i16 @d() local_unnamed_addr !dbg !17 {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%tobool2.not = icmp eq i32 %0, 0
- %1 = load i8, i8* @b, align 1, !dbg !24
+ %1 = load i8, ptr @b, align 1, !dbg !24
%cmp.not13 = icmp eq i8 %1, 7, !dbg !24
br i1 %cmp.not13, label %cleanup, label %if.end.preheader, !dbg !24
i: ; preds = %if.end
%add = add i8 %2, 1, !dbg !24
- store i8 %add, i8* @b, align 1, !dbg !24
+ store i8 %add, ptr @b, align 1, !dbg !24
%cmp.not = icmp eq i8 %add, 7, !dbg !24
call void @llvm.dbg.value(metadata i1 %cmp.not, metadata !23, metadata !DIExpression(DW_OP_LLVM_convert, 1, DW_ATE_unsigned, DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_stack_value)), !dbg !24
br i1 %cmp.not, label %cleanup.loopexit, label %if.end, !dbg !24
cleanup: ; preds = %cleanup.loopexit, %if.end4, %entry
%cmp.not12 = phi i1 [ %cmp.not13, %if.end4 ], [ %cmp.not13, %entry ], [ %cmp.not, %cleanup.loopexit ]
%retval.0 = phi i16 [ %conv5, %if.end4 ], [ undef, %entry ], [ undef, %cleanup.loopexit ]
- %3 = load i8, i8* @c, align 1
+ %3 = load i8, ptr @c, align 1
%conv8 = sext i8 %3 to i16
%retval.1 = select i1 %cmp.not12, i16 %conv8, i16 %retval.0
ret i16 %retval.1, !dbg !24
define void @foo() local_unnamed_addr {
bb:
- %tmp = load i64, i64* getelementptr inbounds (%struct.ham, %struct.ham* @global, i64 0, i32 3), align 8
+ %tmp = load i64, ptr getelementptr inbounds (%struct.ham, ptr @global, i64 0, i32 3), align 8
%tmp1 = and i64 %tmp, 1792
- %tmp2 = load i64, i64* getelementptr inbounds (%struct.ham, %struct.ham* @global, i64 0, i32 4), align 8
+ %tmp2 = load i64, ptr getelementptr inbounds (%struct.ham, ptr @global, i64 0, i32 4), align 8
%tmp3 = add i64 %tmp1, %tmp2
- %tmp4 = load i8*, i8** null, align 8
- %tmp5 = getelementptr inbounds i8, i8* %tmp4, i64 0
+ %tmp4 = load ptr, ptr null, align 8
%tmp6 = sub i64 0, %tmp3
- %tmp7 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp6
- %tmp8 = inttoptr i64 0 to i8*
+ %tmp7 = getelementptr inbounds i8, ptr %tmp4, i64 %tmp6
+ %tmp8 = inttoptr i64 0 to ptr
br label %bb9
; Without filtering non-optimal formulae with the same ScaledReg and Scale, the strategy
; unoptimal result.
; CHECK-LABEL: @foo(
; CHECK: bb9:
-; CHECK-NEXT: = phi i8*
-; CHECK-NEXT: = phi i8*
+; CHECK-NEXT: = phi ptr
+; CHECK-NEXT: = phi ptr
bb9: ; preds = %bb12, %bb
- %tmp10 = phi i8* [ %tmp7, %bb ], [ %tmp16, %bb12 ]
- %tmp11 = phi i8* [ %tmp8, %bb ], [ %tmp17, %bb12 ]
+ %tmp10 = phi ptr [ %tmp7, %bb ], [ %tmp16, %bb12 ]
+ %tmp11 = phi ptr [ %tmp8, %bb ], [ %tmp17, %bb12 ]
br i1 false, label %bb18, label %bb12
bb12: ; preds = %bb9
- %tmp13 = getelementptr inbounds i8, i8* %tmp10, i64 8
- %tmp14 = bitcast i8* %tmp13 to i64*
- %tmp15 = load i64, i64* %tmp14, align 1
- %tmp16 = getelementptr inbounds i8, i8* %tmp10, i64 16
- %tmp17 = getelementptr inbounds i8, i8* %tmp11, i64 16
+ %tmp13 = getelementptr inbounds i8, ptr %tmp10, i64 8
+ %tmp15 = load i64, ptr %tmp13, align 1
+ %tmp16 = getelementptr inbounds i8, ptr %tmp10, i64 16
+ %tmp17 = getelementptr inbounds i8, ptr %tmp11, i64 16
br label %bb9
bb18: ; preds = %bb9
- %tmp19 = icmp ugt i8* %tmp11, null
- %tmp20 = getelementptr inbounds i8, i8* %tmp10, i64 8
- %tmp21 = getelementptr inbounds i8, i8* %tmp11, i64 8
- %tmp22 = select i1 %tmp19, i8* %tmp10, i8* %tmp20
- %tmp23 = select i1 %tmp19, i8* %tmp11, i8* %tmp21
+ %tmp19 = icmp ugt ptr %tmp11, null
+ %tmp20 = getelementptr inbounds i8, ptr %tmp10, i64 8
+ %tmp21 = getelementptr inbounds i8, ptr %tmp11, i64 8
+ %tmp22 = select i1 %tmp19, ptr %tmp10, ptr %tmp20
+ %tmp23 = select i1 %tmp19, ptr %tmp11, ptr %tmp21
br label %bb24
bb24: ; preds = %bb24, %bb18
- %tmp25 = phi i8* [ %tmp27, %bb24 ], [ %tmp22, %bb18 ]
- %tmp26 = phi i8* [ %tmp29, %bb24 ], [ %tmp23, %bb18 ]
- %tmp27 = getelementptr inbounds i8, i8* %tmp25, i64 1
- %tmp28 = load i8, i8* %tmp25, align 1
- %tmp29 = getelementptr inbounds i8, i8* %tmp26, i64 1
- store i8 %tmp28, i8* %tmp26, align 1
- %tmp30 = icmp eq i8* %tmp29, %tmp5
+ %tmp25 = phi ptr [ %tmp27, %bb24 ], [ %tmp22, %bb18 ]
+ %tmp26 = phi ptr [ %tmp29, %bb24 ], [ %tmp23, %bb18 ]
+ %tmp27 = getelementptr inbounds i8, ptr %tmp25, i64 1
+ %tmp28 = load i8, ptr %tmp25, align 1
+ %tmp29 = getelementptr inbounds i8, ptr %tmp26, i64 1
+ store i8 %tmp28, ptr %tmp26, align 1
+ %tmp30 = icmp eq ptr %tmp29, %tmp4
br label %bb24
}
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define void @foo(i32 %size, i32 %nsteps, i32 %hsize, i32* %lined, i8* %maxarray) {
+define void @foo(i32 %size, i32 %nsteps, i32 %hsize, ptr %lined, ptr %maxarray) {
; CHECK-LABEL: @foo(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP215:%.*]] = icmp sgt i32 [[SIZE:%.*]], 1
; CHECK: for.body2.preheader:
; CHECK-NEXT: br label [[FOR_BODY2:%.*]]
; CHECK: for.body2:
-; CHECK-NEXT: [[LSR_IV3:%.*]] = phi i8* [ [[SCEVGEP:%.*]], [[FOR_BODY2]] ], [ [[MAXARRAY:%.*]], [[FOR_BODY2_PREHEADER]] ]
+; CHECK-NEXT: [[LSR_IV3:%.*]] = phi ptr [ [[SCEVGEP:%.*]], [[FOR_BODY2]] ], [ [[MAXARRAY:%.*]], [[FOR_BODY2_PREHEADER]] ]
; CHECK-NEXT: [[LSR_IV:%.*]] = phi i64 [ [[LSR_IV_NEXT:%.*]], [[FOR_BODY2]] ], [ [[TMP0]], [[FOR_BODY2_PREHEADER]] ]
-; CHECK-NEXT: [[SCEVGEP6:%.*]] = getelementptr i8, i8* [[LSR_IV3]], i64 1
-; CHECK-NEXT: [[V1:%.*]] = load i8, i8* [[SCEVGEP6]], align 1
-; CHECK-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, i8* [[LSR_IV3]], i64 [[TMP0]]
-; CHECK-NEXT: [[V2:%.*]] = load i8, i8* [[SCEVGEP5]], align 1
+; CHECK-NEXT: [[SCEVGEP6:%.*]] = getelementptr i8, ptr [[LSR_IV3]], i64 1
+; CHECK-NEXT: [[V1:%.*]] = load i8, ptr [[SCEVGEP6]], align 1
+; CHECK-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[LSR_IV3]], i64 [[TMP0]]
+; CHECK-NEXT: [[V2:%.*]] = load i8, ptr [[SCEVGEP5]], align 1
; CHECK-NEXT: [[TMPV:%.*]] = xor i8 [[V1]], [[V2]]
-; CHECK-NEXT: [[SCEVGEP4:%.*]] = getelementptr i8, i8* [[LSR_IV3]], i64 [[LSR_IV1]]
-; CHECK-NEXT: store i8 [[TMPV]], i8* [[SCEVGEP4]], align 1
+; CHECK-NEXT: [[SCEVGEP4:%.*]] = getelementptr i8, ptr [[LSR_IV3]], i64 [[LSR_IV1]]
+; CHECK-NEXT: store i8 [[TMPV]], ptr [[SCEVGEP4]], align 1
; CHECK-NEXT: [[LSR_IV_NEXT]] = add nsw i64 [[LSR_IV]], -1
-; CHECK-NEXT: [[SCEVGEP]] = getelementptr i8, i8* [[LSR_IV3]], i64 1
+; CHECK-NEXT: [[SCEVGEP]] = getelementptr i8, ptr [[LSR_IV3]], i64 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[LSR_IV_NEXT]], 0
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY2]], label [[FOR_INC_LOOPEXIT:%.*]]
; CHECK: for.inc.loopexit:
for.body2: ; preds = %for.body2.preheader, %for.body2
%indvars.iv = phi i64 [ 1, %for.body2.preheader ], [ %indvars.iv.next, %for.body2 ]
- %arrayidx1 = getelementptr inbounds i8, i8* %maxarray, i64 %indvars.iv
- %v1 = load i8, i8* %arrayidx1, align 1
+ %arrayidx1 = getelementptr inbounds i8, ptr %maxarray, i64 %indvars.iv
+ %v1 = load i8, ptr %arrayidx1, align 1
%idx2 = add nsw i64 %indvars.iv, %sub2
- %arrayidx2 = getelementptr inbounds i8, i8* %maxarray, i64 %idx2
- %v2 = load i8, i8* %arrayidx2, align 1
+ %arrayidx2 = getelementptr inbounds i8, ptr %maxarray, i64 %idx2
+ %v2 = load i8, ptr %arrayidx2, align 1
%tmpv = xor i8 %v1, %v2
%t4 = add nsw i64 %t2, %indvars.iv
- %add.ptr = getelementptr inbounds i8, i8* %maxarray, i64 %t4
- store i8 %tmpv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %maxarray, i64 %t4
+ store i8 %tmpv, ptr %add.ptr, align 1
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%wide.trip.count = zext i32 %size to i64
%exitcond = icmp ne i64 %indvars.iv.next, %wide.trip.count
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @indvar_expansion(i8* nocapture readonly %rowsptr) {
+define void @indvar_expansion(ptr nocapture readonly %rowsptr) {
entry:
br label %for.cond
vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ %0, %for.body14.lr.ph ]
- %4 = getelementptr inbounds i8, i8* %rowsptr, i64 %index
- %5 = bitcast i8* %4 to <4 x i8>*
- %wide.load = load <4 x i8>, <4 x i8>* %5, align 1
+ %4 = getelementptr inbounds i8, ptr %rowsptr, i64 %index
+ %wide.load = load <4 x i8>, ptr %4, align 1
%index.next = add i64 %index, 8
- %6 = icmp eq i64 %index.next, %end.idx.rnd.down
- br i1 %6, label %for.end24, label %vector.body
+ %5 = icmp eq i64 %index.next, %end.idx.rnd.down
+ br i1 %5, label %for.end24, label %vector.body
for.end24:
ret void
; Function Attrs: nounwind optsize ssp uwtable
define i32 @main() #0 {
entry:
- store i8 0, i8* @h, align 1
- %0 = load i32, i32* @j, align 4
+ store i8 0, ptr @h, align 1
+ %0 = load i32, ptr @j, align 4
%tobool.i = icmp eq i32 %0, 0
- %1 = load i32, i32* @d, align 4
+ %1 = load i32, ptr @d, align 4
%cmp3 = icmp sgt i32 %1, -1
%.lobit = lshr i32 %1, 31
%.lobit.not = xor i32 %.lobit, 1
br i1 %tobool.i, label %fn3.exit, label %land.rhs.i
land.rhs.i: ; preds = %for.body
- store i32 0, i32* @c, align 4
+ store i32 0, ptr @c, align 4
br label %fn3.exit
fn3.exit: ; preds = %for.body, %land.rhs.i
for.end: ; preds = %fn3.exit
%.lobit.not. = select i1 %cmp3, i32 %.lobit.not, i32 0
- store i32 %conv, i32* @g, align 4
- store i32 %.lobit.not., i32* @i, align 4
- store i8 %inc, i8* @h, align 1
+ store i32 %conv, ptr @g, align 4
+ store i32 %.lobit.not., ptr @i, align 4
+ store i8 %inc, ptr @h, align 1
%conv7 = sext i8 %inc to i32
%add = add nsw i32 %conv7, %conv
- store i32 %add, i32* @e, align 4
- %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %add) #2
+ store i32 %add, ptr @e, align 4
+ %call = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %add) #2
ret i32 0
}
; Function Attrs: nounwind optsize
-declare i32 @printf(i8* nocapture readonly, ...) #1
+declare i32 @printf(ptr nocapture readonly, ...) #1
attributes #0 = { nounwind optsize ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
; CHECK-LABEL: @main(
define i32 @main() {
entry:
- %a0 = load i32, i32* @a, align 4
+ %a0 = load i32, ptr @a, align 4
%cmpa = icmp slt i32 %a0, 4
br i1 %cmpa, label %preheader, label %for.end
preheader:
- %b0 = load i8, i8* @b, align 1
+ %b0 = load i8, ptr @b, align 1
%b0sext = sext i8 %b0 to i64
br label %for.body
br i1 %cmp, label %lor.false, label %if.then
lor.false:
- %cgep = getelementptr inbounds [4 x i8], [4 x i8]* @c, i64 0, i64 %iv
- %ci = load i8, i8* %cgep, align 1
+ %cgep = getelementptr inbounds [4 x i8], ptr @c, i64 0, i64 %iv
+ %ci = load i8, ptr %cgep, align 1
%cisext = sext i8 %ci to i32
%ivtrunc = trunc i64 %iv to i32
%cmp2 = icmp eq i32 %cisext, %ivtrunc
; CHECK-NEXT: bb:
; CHECK-NEXT: br label [[BB10:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: store i64 [[LSR_IV_NEXT2:%.*]], i64 addrspace(1)* undef, align 8
+; CHECK-NEXT: store i64 [[LSR_IV_NEXT2:%.*]], ptr addrspace(1) undef, align 8
; CHECK-NEXT: ret i32 [[LSR_IV_NEXT:%.*]]
; CHECK: bb10:
; CHECK-NEXT: [[LSR_IV1:%.*]] = phi i64 [ [[LSR_IV_NEXT2]], [[BB10]] ], [ 9, [[BB:%.*]] ]
%tmp6 = add i64 %tmp5, undef
%tmp7 = add i64 %tmp6, undef
%tmp8 = add i64 undef, %tmp7
- store i64 %tmp8, i64 addrspace(1)* undef, align 8
+ store i64 %tmp8, ptr addrspace(1) undef, align 8
%tmp9 = trunc i64 %tmp7 to i32
ret i32 %tmp9
target triple = "x86_64-unknown-linux-gnu"
declare void @use(i8 zeroext)
-declare void @use_p(i8*)
+declare void @use_p(ptr)
; nuw needs to be dropped when switching to post-inc comparison.
define i8 @drop_nuw() {
; Make sure we do not crash when applying info from loop guards to expressions in @bar.
; Test case for PR47776.
-define void @bar() personality i32* ()* @zot {
+define void @bar() personality ptr @zot {
; CHECK-LABEL: @bar(
; CHECK-NEXT: bb:
; CHECK-NEXT: br label [[BB1:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = invoke i32 @fn()
; CHECK-NEXT: to label [[BB5]] unwind label [[BB23_LOOPEXIT_SPLIT_LP:%.*]]
; CHECK: bb5:
-; CHECK-NEXT: [[TMP6:%.*]] = load atomic i32, i32 addrspace(1)* undef unordered, align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load atomic i32, ptr addrspace(1) undef unordered, align 8
; CHECK-NEXT: [[TMP7]] = add nuw nsw i64 [[TMP3]], 1
; CHECK-NEXT: [[C_0:%.*]] = icmp ult i64 [[TMP7]], 10000
; CHECK-NEXT: br i1 [[C_0]], label [[BB2]], label [[BB8:%.*]]
to label %bb5 unwind label %bb23
bb5: ; preds = %bb2
- %tmp6 = load atomic i32, i32 addrspace(1)* undef unordered, align 8
+ %tmp6 = load atomic i32, ptr addrspace(1) undef unordered, align 8
%tmp7 = add nuw nsw i64 %tmp3, 1
%c.0 = icmp ult i64 %tmp7, 10000
br i1 %c.0, label %bb2, label %bb8
ret void
}
-declare i32* @zot() #1
+declare ptr @zot() #1
declare i32 @fn()
; CHECK-NEXT: [[I_0:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[DO_BODY]] ]
; CHECK-NEXT: tail call void @goo(i64 [[I_0]], i64 [[I_0]])
; CHECK-NEXT: [[INC]] = add nuw i64 [[I_0]], 1
-; CHECK-NEXT: [[T0:%.*]] = load i64, i64* @cond, align 8
+; CHECK-NEXT: [[T0:%.*]] = load i64, ptr @cond, align 8
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i64 [[T0]], 0
; CHECK-NEXT: br i1 [[TOBOOL]], label [[DO_BODY2_PREHEADER:%.*]], label [[DO_BODY]]
; CHECK: do.body2.preheader:
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INC]], [[I_1]]
; CHECK-NEXT: tail call void @goo(i64 [[I_1]], i64 [[TMP0]])
; CHECK-NEXT: [[INC3]] = add nuw i64 [[I_1]], 1
-; CHECK-NEXT: [[T1:%.*]] = load i64, i64* @cond, align 8
+; CHECK-NEXT: [[T1:%.*]] = load i64, ptr @cond, align 8
; CHECK-NEXT: [[TOBOOL6:%.*]] = icmp eq i64 [[T1]], 0
; CHECK-NEXT: br i1 [[TOBOOL6]], label [[DO_BODY8_PREHEADER:%.*]], label [[DO_BODY2]]
; CHECK: do.body8.preheader:
; CHECK-NEXT: tail call void @goo(i64 [[I_2]], i64 [[J_2]])
; CHECK-NEXT: [[INC9]] = add nuw nsw i64 [[I_2]], 1
; CHECK-NEXT: [[INC10]] = add i64 [[J_2]], 1
-; CHECK-NEXT: [[T2:%.*]] = load i64, i64* @cond, align 8
+; CHECK-NEXT: [[T2:%.*]] = load i64, ptr @cond, align 8
; CHECK-NEXT: [[TOBOOL12:%.*]] = icmp eq i64 [[T2]], 0
; CHECK-NEXT: br i1 [[TOBOOL12]], label [[DO_BODY14_PREHEADER:%.*]], label [[DO_BODY8]]
; CHECK: do.body14.preheader:
; CHECK-NEXT: tail call void @goo(i64 [[I_3]], i64 [[J_3]])
; CHECK-NEXT: [[INC15]] = add nuw nsw i64 [[I_3]], 1
; CHECK-NEXT: [[INC16]] = add i64 [[J_3]], 1
-; CHECK-NEXT: [[T3:%.*]] = load i64, i64* @cond, align 8
+; CHECK-NEXT: [[T3:%.*]] = load i64, ptr @cond, align 8
; CHECK-NEXT: [[TOBOOL18:%.*]] = icmp eq i64 [[T3]], 0
; CHECK-NEXT: br i1 [[TOBOOL18]], label [[DO_BODY20_PREHEADER:%.*]], label [[DO_BODY14]]
; CHECK: do.body20.preheader:
; CHECK-NEXT: tail call void @goo(i64 [[I_4]], i64 [[J_4]])
; CHECK-NEXT: [[INC21]] = add nuw nsw i64 [[I_4]], 1
; CHECK-NEXT: [[INC22]] = add i64 [[J_4]], 1
-; CHECK-NEXT: [[T4:%.*]] = load i64, i64* @cond, align 8
+; CHECK-NEXT: [[T4:%.*]] = load i64, ptr @cond, align 8
; CHECK-NEXT: [[TOBOOL24:%.*]] = icmp eq i64 [[T4]], 0
; CHECK-NEXT: br i1 [[TOBOOL24]], label [[DO_BODY26_PREHEADER:%.*]], label [[DO_BODY20]]
; CHECK: do.body26.preheader:
; CHECK-NEXT: tail call void @goo(i64 [[I_5]], i64 [[J_5]])
; CHECK-NEXT: [[INC27]] = add nuw nsw i64 [[I_5]], 1
; CHECK-NEXT: [[INC28]] = add nsw i64 [[J_5]], 1
-; CHECK-NEXT: [[T5:%.*]] = load i64, i64* @cond, align 8
+; CHECK-NEXT: [[T5:%.*]] = load i64, ptr @cond, align 8
; CHECK-NEXT: [[TOBOOL30:%.*]] = icmp eq i64 [[T5]], 0
; CHECK-NEXT: br i1 [[TOBOOL30]], label [[DO_END31:%.*]], label [[DO_BODY26]]
; CHECK: do.end31:
%i.0 = phi i64 [ 0, %entry ], [ %inc, %do.body ]
tail call void @goo(i64 %i.0, i64 %i.0)
%inc = add nuw nsw i64 %i.0, 1
- %t0 = load i64, i64* @cond, align 8
+ %t0 = load i64, ptr @cond, align 8
%tobool = icmp eq i64 %t0, 0
br i1 %tobool, label %do.body2.preheader, label %do.body
tail call void @goo(i64 %i.1, i64 %j.1)
%inc3 = add nuw nsw i64 %i.1, 1
%inc4 = add nsw i64 %j.1, 1
- %t1 = load i64, i64* @cond, align 8
+ %t1 = load i64, ptr @cond, align 8
%tobool6 = icmp eq i64 %t1, 0
br i1 %tobool6, label %do.body8.preheader, label %do.body2
tail call void @goo(i64 %i.2, i64 %j.2)
%inc9 = add nuw nsw i64 %i.2, 1
%inc10 = add nsw i64 %j.2, 1
- %t2 = load i64, i64* @cond, align 8
+ %t2 = load i64, ptr @cond, align 8
%tobool12 = icmp eq i64 %t2, 0
br i1 %tobool12, label %do.body14.preheader, label %do.body8
tail call void @goo(i64 %i.3, i64 %j.3)
%inc15 = add nuw nsw i64 %i.3, 1
%inc16 = add nsw i64 %j.3, 1
- %t3 = load i64, i64* @cond, align 8
+ %t3 = load i64, ptr @cond, align 8
%tobool18 = icmp eq i64 %t3, 0
br i1 %tobool18, label %do.body20.preheader, label %do.body14
tail call void @goo(i64 %i.4, i64 %j.4)
%inc21 = add nuw nsw i64 %i.4, 1
%inc22 = add nsw i64 %j.4, 1
- %t4 = load i64, i64* @cond, align 8
+ %t4 = load i64, ptr @cond, align 8
%tobool24 = icmp eq i64 %t4, 0
br i1 %tobool24, label %do.body26.preheader, label %do.body20
tail call void @goo(i64 %i.5, i64 %j.5)
%inc27 = add nuw nsw i64 %i.5, 1
%inc28 = add nsw i64 %j.5, 1
- %t5 = load i64, i64* @cond, align 8
+ %t5 = load i64, ptr @cond, align 8
%tobool30 = icmp eq i64 %t5, 0
br i1 %tobool30, label %do.end31, label %do.body26
for.cond: ; preds = %if.end3, %entry
%g.0 = phi i32 [ undef, %entry ], [ %inc, %if.end3 ]
- %0 = load i32, i32* @f, align 4
+ %0 = load i32, ptr @f, align 4
%tobool.not = icmp eq i32 %0, 0
br i1 %tobool.not, label %if.end3, label %if.then
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
-define dso_local void @foo(i8* nocapture %p) local_unnamed_addr !dbg !7 {
+define dso_local void @foo(ptr nocapture %p) local_unnamed_addr !dbg !7 {
; CHECK-LABEL: @foo(
entry:
- call void @llvm.dbg.value(metadata i8* %p, metadata !13, metadata !DIExpression()), !dbg !16
+ call void @llvm.dbg.value(metadata ptr %p, metadata !13, metadata !DIExpression()), !dbg !16
call void @llvm.dbg.value(metadata i8 0, metadata !14, metadata !DIExpression()), !dbg !17
br label %for.body, !dbg !18
for.body: ; preds = %entry, %for.body
; CHECK-LABEL: for.body:
%i.06 = phi i8 [ 0, %entry ], [ %inc, %for.body ]
- %p.addr.05 = phi i8* [ %p, %entry ], [ %add.ptr, %for.body ]
+ %p.addr.05 = phi ptr [ %p, %entry ], [ %add.ptr, %for.body ]
call void @llvm.dbg.value(metadata i8 %i.06, metadata !14, metadata !DIExpression()), !dbg !17
- call void @llvm.dbg.value(metadata i8* %p.addr.05, metadata !13, metadata !DIExpression()), !dbg !16
-; CHECK-NOT: call void @llvm.dbg.value(metadata i8* undef
-; CHECK: all void @llvm.dbg.value(metadata i8* %lsr.iv, metadata ![[MID_p:[0-9]+]], metadata !DIExpression(DW_OP_constu, 3, DW_OP_minus, DW_OP_stack_value))
- %add.ptr = getelementptr inbounds i8, i8* %p.addr.05, i64 3, !dbg !20
- call void @llvm.dbg.value(metadata i8* %add.ptr, metadata !13, metadata !DIExpression()), !dbg !16
-; CHECK-NOT: call void @llvm.dbg.value(metadata i8* undef
-; CHECK: call void @llvm.dbg.value(metadata i8* %lsr.iv, metadata ![[MID_p]], metadata !DIExpression())
- store i8 %i.06, i8* %add.ptr, align 1, !dbg !23, !tbaa !24
+ call void @llvm.dbg.value(metadata ptr %p.addr.05, metadata !13, metadata !DIExpression()), !dbg !16
+; CHECK-NOT: call void @llvm.dbg.value(metadata ptr undef
+; CHECK: all void @llvm.dbg.value(metadata ptr %lsr.iv, metadata ![[MID_p:[0-9]+]], metadata !DIExpression(DW_OP_constu, 3, DW_OP_minus, DW_OP_stack_value))
+ %add.ptr = getelementptr inbounds i8, ptr %p.addr.05, i64 3, !dbg !20
+ call void @llvm.dbg.value(metadata ptr %add.ptr, metadata !13, metadata !DIExpression()), !dbg !16
+; CHECK-NOT: call void @llvm.dbg.value(metadata ptr undef
+; CHECK: call void @llvm.dbg.value(metadata ptr %lsr.iv, metadata ![[MID_p]], metadata !DIExpression())
+ store i8 %i.06, ptr %add.ptr, align 1, !dbg !23, !tbaa !24
%inc = add nuw nsw i8 %i.06, 1, !dbg !27
call void @llvm.dbg.value(metadata i8 %inc, metadata !14, metadata !DIExpression()), !dbg !17
%exitcond.not = icmp eq i8 %inc, 32, !dbg !28
br label %for.body.for.body_crit_edge
for.body.for.body_crit_edge: ; preds = %for.body
- %.pre8 = load i128, i128* undef, align 16, !dbg !19, !tbaa !37
+ %.pre8 = load i128, ptr undef, align 16, !dbg !19, !tbaa !37
br label %for.body, !dbg !17
}
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define dso_local void @_Z21mul_pow_of_2_to_shiftjPj(i32 %size, i32* nocapture %data) local_unnamed_addr !dbg !7 {
+define dso_local void @_Z21mul_pow_of_2_to_shiftjPj(i32 %size, ptr nocapture %data) local_unnamed_addr !dbg !7 {
entry:
call void @llvm.dbg.value(metadata i32 %size, metadata !12, metadata !DIExpression()), !dbg !13
- call void @llvm.dbg.value(metadata i32* %data, metadata !14, metadata !DIExpression()), !dbg !13
+ call void @llvm.dbg.value(metadata ptr %data, metadata !14, metadata !DIExpression()), !dbg !13
call void @llvm.dbg.value(metadata i32 0, metadata !15, metadata !DIExpression()), !dbg !13
%cmp4.not = icmp eq i32 %size, 0, !dbg !13
br i1 %cmp4.not, label %while.end, label %while.body.preheader, !dbg !13
%0 = trunc i64 %indvars.iv to i32, !dbg !16
%mul = shl i32 %0, 3, !dbg !16
call void @llvm.dbg.value(metadata i32 %mul, metadata !18, metadata !DIExpression()), !dbg !16
- %arrayidx = getelementptr inbounds i32, i32* %data, i64 %indvars.iv, !dbg !16
- store i32 %mul, i32* %arrayidx, align 4, !dbg !16
+ %arrayidx = getelementptr inbounds i32, ptr %data, i64 %indvars.iv, !dbg !16
+ store i32 %mul, ptr %arrayidx, align 4, !dbg !16
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !16
call void @llvm.dbg.value(metadata i64 %indvars.iv.next, metadata !15, metadata !DIExpression()), !dbg !13
%exitcond = icmp ne i64 %indvars.iv.next, %wide.trip.count, !dbg !13
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define dso_local void @_Z15mul_to_additionjjPj(i32 %k, i32 %size, i32* nocapture %data) local_unnamed_addr !dbg !7 {
+define dso_local void @_Z15mul_to_additionjjPj(i32 %k, i32 %size, ptr nocapture %data) local_unnamed_addr !dbg !7 {
entry:
call void @llvm.dbg.value(metadata i32 %k, metadata !13, metadata !DIExpression()), !dbg !14
call void @llvm.dbg.value(metadata i32 %size, metadata !15, metadata !DIExpression()), !dbg !14
- call void @llvm.dbg.value(metadata i32* %data, metadata !16, metadata !DIExpression()), !dbg !14
+ call void @llvm.dbg.value(metadata ptr %data, metadata !16, metadata !DIExpression()), !dbg !14
call void @llvm.dbg.value(metadata i32 0, metadata !17, metadata !DIExpression()), !dbg !14
br label %while.cond, !dbg !14
%add = add i32 %mul, %k, !dbg !19
call void @llvm.dbg.value(metadata i32 %add, metadata !21, metadata !DIExpression()), !dbg !19
%idxprom = zext i32 %i.0 to i64, !dbg !19
- %arrayidx = getelementptr inbounds i32, i32* %data, i64 %idxprom, !dbg !19
- store i32 %add, i32* %arrayidx, align 4, !dbg !19
+ %arrayidx = getelementptr inbounds i32, ptr %data, i64 %idxprom, !dbg !19
+ store i32 %add, ptr %arrayidx, align 4, !dbg !19
%add1 = add nuw nsw i32 %i.0, 1, !dbg !19
call void @llvm.dbg.value(metadata i32 %add1, metadata !17, metadata !DIExpression()), !dbg !14
br label %while.cond, !dbg !14, !llvm.loop !22
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define dso_local void @_Z16basic_recurrencejjPj(i32 %k, i32 %size, i32* nocapture %data) local_unnamed_addr !dbg !7 {
+define dso_local void @_Z16basic_recurrencejjPj(i32 %k, i32 %size, ptr nocapture %data) local_unnamed_addr !dbg !7 {
entry:
call void @llvm.dbg.value(metadata i32 %k, metadata !13, metadata !DIExpression()), !dbg !14
call void @llvm.dbg.value(metadata i32 %size, metadata !15, metadata !DIExpression()), !dbg !14
- call void @llvm.dbg.value(metadata i32* %data, metadata !16, metadata !DIExpression()), !dbg !14
+ call void @llvm.dbg.value(metadata ptr %data, metadata !16, metadata !DIExpression()), !dbg !14
br label %while.cond, !dbg !14
while.cond: ; preds = %while.body, %entry
%mul = mul i32 %i.0, %k, !dbg !18
call void @llvm.dbg.value(metadata i32 %mul, metadata !20, metadata !DIExpression()), !dbg !18
%idxprom = zext i32 %i.0 to i64, !dbg !18
- %arrayidx = getelementptr inbounds i32, i32* %data, i64 %idxprom, !dbg !18
- store i32 %mul, i32* %arrayidx, align 4, !dbg !18
+ %arrayidx = getelementptr inbounds i32, ptr %data, i64 %idxprom, !dbg !18
+ store i32 %mul, ptr %arrayidx, align 4, !dbg !18
%inc = add nuw i32 %i.0, 1, !dbg !18
call void @llvm.dbg.value(metadata i32 %inc, metadata !17, metadata !DIExpression()), !dbg !14
br label %while.cond, !dbg !14, !llvm.loop !21
@__const.main.data = private unnamed_addr constant [16 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15], align 16
-define dso_local void @_Z21mul_pow_of_2_to_shiftjPjj(i32 %size, i32* nocapture %data, i32 %multiplicand) local_unnamed_addr !dbg !7 {
+define dso_local void @_Z21mul_pow_of_2_to_shiftjPjj(i32 %size, ptr nocapture %data, i32 %multiplicand) local_unnamed_addr !dbg !7 {
entry:
call void @llvm.dbg.value(metadata i32 %size, metadata !12, metadata !DIExpression()), !dbg !13
- call void @llvm.dbg.value(metadata i32* %data, metadata !14, metadata !DIExpression()), !dbg !13
+ call void @llvm.dbg.value(metadata ptr %data, metadata !14, metadata !DIExpression()), !dbg !13
call void @llvm.dbg.value(metadata i32 %multiplicand, metadata !15, metadata !DIExpression()), !dbg !13
call void @llvm.dbg.value(metadata i32 0, metadata !16, metadata !DIExpression()), !dbg !13
br label %while.cond, !dbg !13
%mul = mul i32 %i.0, %multiplicand, !dbg !17
call void @llvm.dbg.value(metadata i32 %mul, metadata !19, metadata !DIExpression()), !dbg !17
%idxprom = zext i32 %i.0 to i64, !dbg !17
- %arrayidx = getelementptr inbounds i32, i32* %data, i64 %idxprom, !dbg !17
- store i32 %mul, i32* %arrayidx, align 4, !dbg !17
+ %arrayidx = getelementptr inbounds i32, ptr %data, i64 %idxprom, !dbg !17
+ store i32 %mul, ptr %arrayidx, align 4, !dbg !17
%inc = add nuw i32 %i.0, 1, !dbg !17
call void @llvm.dbg.value(metadata i32 %inc, metadata !16, metadata !DIExpression()), !dbg !13
br label %while.cond, !dbg !13, !llvm.loop !20
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define dso_local void @_Z9zext_scevPljs(i64* nocapture %arr, i32 %factor0, i16 signext %factor1) local_unnamed_addr !dbg !90 {
+define dso_local void @_Z9zext_scevPljs(ptr nocapture %arr, i32 %factor0, i16 signext %factor1) local_unnamed_addr !dbg !90 {
entry:
- call void @llvm.dbg.value(metadata i64* %arr, metadata !94, metadata !DIExpression()), !dbg !95
+ call void @llvm.dbg.value(metadata ptr %arr, metadata !94, metadata !DIExpression()), !dbg !95
call void @llvm.dbg.value(metadata i32 %factor0, metadata !96, metadata !DIExpression()), !dbg !95
call void @llvm.dbg.value(metadata i16 %factor1, metadata !97, metadata !DIExpression()), !dbg !95
call void @llvm.dbg.value(metadata i32 0, metadata !98, metadata !DIExpression()), !dbg !95
call void @llvm.dbg.value(metadata i32 %sub, metadata !101, metadata !DIExpression()), !dbg !99
%conv2 = zext i32 %sub to i64, !dbg !99
%idxprom = zext i32 %i.04 to i64, !dbg !99
- %arrayidx = getelementptr inbounds i64, i64* %arr, i64 %idxprom, !dbg !99
- store i64 %conv2, i64* %arrayidx, align 8, !dbg !99
+ %arrayidx = getelementptr inbounds i64, ptr %arr, i64 %idxprom, !dbg !99
+ store i64 %conv2, ptr %arrayidx, align 8, !dbg !99
%inc = add nuw nsw i32 %i.04, 1, !dbg !99
call void @llvm.dbg.value(metadata i32 %inc, metadata !98, metadata !DIExpression()), !dbg !95
%cmp = icmp ult i32 %inc, 63, !dbg !95
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define dso_local void @_Z15mul_to_additionjjjjPj(i32 %k, i32 %l, i32 %m, i32 %size, i32* nocapture %data) local_unnamed_addr #0 !dbg !7 {
+define dso_local void @_Z15mul_to_additionjjjjPj(i32 %k, i32 %l, i32 %m, i32 %size, ptr nocapture %data) local_unnamed_addr #0 !dbg !7 {
entry:
call void @llvm.dbg.value(metadata i32 %k, metadata !14, metadata !DIExpression()), !dbg !24
call void @llvm.dbg.value(metadata i32 %l, metadata !15, metadata !DIExpression()), !dbg !24
call void @llvm.dbg.value(metadata i32 %m, metadata !16, metadata !DIExpression()), !dbg !24
call void @llvm.dbg.value(metadata i32 %size, metadata !17, metadata !DIExpression()), !dbg !24
- call void @llvm.dbg.value(metadata i32* %data, metadata !18, metadata !DIExpression()), !dbg !24
+ call void @llvm.dbg.value(metadata ptr %data, metadata !18, metadata !DIExpression()), !dbg !24
call void @llvm.dbg.value(metadata i32 0, metadata !19, metadata !DIExpression()), !dbg !24
%cmp9.not = icmp eq i32 %size, 0, !dbg !25
br i1 %cmp9.not, label %while.end, label %while.body.preheader, !dbg !26
call void @llvm.dbg.value(metadata !DIArgList(i32 %add, i32 %l), metadata !22, metadata !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_arg, 1, DW_OP_mul, DW_OP_stack_value)), !dbg !29
call void @llvm.dbg.value(metadata !DIArgList(i32 %add, i32 %m, i32 %l), metadata !23, metadata !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_arg, 2, DW_OP_mul, DW_OP_LLVM_arg, 1, DW_OP_shl, DW_OP_stack_value)), !dbg !29
call void @llvm.dbg.value(metadata !DIArgList(i32 %m, i32 %add, i32 %l), metadata !23, metadata !DIExpression(DW_OP_LLVM_arg, 1, DW_OP_LLVM_arg, 2, DW_OP_mul, DW_OP_LLVM_arg, 0, DW_OP_shl, DW_OP_stack_value)), !dbg !29
- %arrayidx = getelementptr inbounds i32, i32* %data, i64 %indvars.iv, !dbg !30
- store i32 %add, i32* %arrayidx, align 4, !dbg !31
+ %arrayidx = getelementptr inbounds i32, ptr %data, i64 %indvars.iv, !dbg !30
+ store i32 %add, ptr %arrayidx, align 4, !dbg !31
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !32
call void @llvm.dbg.value(metadata i64 %indvars.iv.next, metadata !19, metadata !DIExpression()), !dbg !24
%exitcond = icmp ne i64 %indvars.iv.next, %wide.trip.count, !dbg !25
; RUN: opt -loop-reduce < %s
; we used to crash on this one
-declare i8* @_Znwm()
+declare ptr @_Znwm()
declare i32 @__gxx_personality_v0(...)
declare void @g()
-define void @f() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @f() personality ptr @__gxx_personality_v0 {
bb0:
br label %bb1
bb1:
%v2 = icmp eq i64 %v0, 0
br i1 %v2, label %bb6, label %bb3
bb3:
- %v3 = invoke noalias i8* @_Znwm()
+ %v3 = invoke noalias ptr @_Znwm()
to label %bb5 unwind label %bb4
bb4:
- %v4 = landingpad { i8*, i32 }
+ %v4 = landingpad { ptr, i32 }
cleanup
br label %bb9
bb5:
- %v5 = bitcast i8* %v3 to i32**
- %add.ptr.i = getelementptr inbounds i32*, i32** %v5, i64 %v0
+ %add.ptr.i = getelementptr inbounds ptr, ptr %v3, i64 %v0
br label %bb6
bb6:
- %v6 = phi i32** [ null, %bb2 ], [ %add.ptr.i, %bb5 ]
+ %v6 = phi ptr [ null, %bb2 ], [ %add.ptr.i, %bb5 ]
invoke void @g()
to label %bb7 unwind label %bb8
bb7:
unreachable
bb8:
- %v7 = landingpad { i8*, i32 }
+ %v7 = landingpad { ptr, i32 }
cleanup
br label %bb9
bb9:
- resume { i8*, i32 } zeroinitializer
+ resume { ptr, i32 } zeroinitializer
}
-define void @h() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @h() personality ptr @__gxx_personality_v0 {
bb1:
invoke void @g() optsize
to label %bb2 unwind label %bb5
bb2:
- %arrayctor.cur = phi i8* [ undef, %bb1 ], [ %arrayctor.next, %bb3 ]
+ %arrayctor.cur = phi ptr [ undef, %bb1 ], [ %arrayctor.next, %bb3 ]
invoke void @g() optsize
to label %bb3 unwind label %bb6
bb3:
- %arrayctor.next = getelementptr inbounds i8, i8* %arrayctor.cur, i64 1
+ %arrayctor.next = getelementptr inbounds i8, ptr %arrayctor.cur, i64 1
br label %bb2
bb4:
ret void
bb5:
- %tmp = landingpad { i8*, i32 }
+ %tmp = landingpad { ptr, i32 }
cleanup
invoke void @g() optsize
to label %bb4 unwind label %bb7
bb6:
- %tmp1 = landingpad { i8*, i32 }
+ %tmp1 = landingpad { ptr, i32 }
cleanup
- %arraydestroy.isempty = icmp eq i8* undef, %arrayctor.cur
+ %arraydestroy.isempty = icmp eq ptr undef, %arrayctor.cur
ret void
bb7:
- %lpad.nonloopexit = landingpad { i8*, i32 }
- catch i8* null
+ %lpad.nonloopexit = landingpad { ptr, i32 }
+ catch ptr null
ret void
}
br label %while.cond
while.cond: ; preds = %while.cond, %entry
- %c.0 = phi i16* [ undef, %entry ], [ %incdec.ptr, %while.cond ]
- %incdec.ptr = getelementptr inbounds i16, i16* %c.0, i64 1
+ %c.0 = phi ptr [ undef, %entry ], [ %incdec.ptr, %while.cond ]
+ %incdec.ptr = getelementptr inbounds i16, ptr %c.0, i64 1
br i1 undef, label %while.cond1, label %while.cond
while.cond1: ; preds = %while.cond1, %while.cond
- %c.1 = phi i16* [ %incdec.ptr5, %while.cond1 ], [ %c.0, %while.cond ]
- %incdec.ptr5 = getelementptr inbounds i16, i16* %c.1, i64 1
+ %c.1 = phi ptr [ %incdec.ptr5, %while.cond1 ], [ %c.0, %while.cond ]
+ %incdec.ptr5 = getelementptr inbounds i16, ptr %c.1, i64 1
br i1 undef, label %while.cond7, label %while.cond1
while.cond7: ; preds = %while.cond7, %while.cond1
- %0 = phi i16* [ %incdec.ptr10, %while.cond7 ], [ %c.1, %while.cond1 ]
- %incdec.ptr10 = getelementptr inbounds i16, i16* %0, i64 1
+ %0 = phi ptr [ %incdec.ptr10, %while.cond7 ], [ %c.1, %while.cond1 ]
+ %incdec.ptr10 = getelementptr inbounds i16, ptr %0, i64 1
br i1 undef, label %while.cond12.preheader, label %while.cond7
while.cond12.preheader: ; preds = %while.cond7
br i1 undef, label %while.end16, label %while.body13.lr.ph
while.body13: ; preds = %if.else, %while.body13.lr.ph
- %1 = phi i16* [ %2, %while.body13.lr.ph ], [ %incdec.ptr15, %if.else ]
+ %1 = phi ptr [ %2, %while.body13.lr.ph ], [ %incdec.ptr15, %if.else ]
br i1 undef, label %while.cond12.outer.loopexit, label %if.else
while.cond12.outer.loopexit: ; preds = %while.body13
br i1 undef, label %while.end16, label %while.body13.lr.ph
while.body13.lr.ph: ; preds = %while.cond12.outer.loopexit, %while.cond12.preheader
- %2 = phi i16* [ %1, %while.cond12.outer.loopexit ], [ undef, %while.cond12.preheader ]
+ %2 = phi ptr [ %1, %while.cond12.outer.loopexit ], [ undef, %while.cond12.preheader ]
br label %while.body13
if.else: ; preds = %while.body13
- %incdec.ptr15 = getelementptr inbounds i16, i16* %1, i64 1
- %cmp = icmp eq i16* %incdec.ptr15, %0
+ %incdec.ptr15 = getelementptr inbounds i16, ptr %1, i64 1
+ %cmp = icmp eq ptr %incdec.ptr15, %0
br i1 %cmp, label %while.end16, label %while.body13
while.end16: ; preds = %if.else, %while.cond12.outer.loopexit, %while.cond12.preheader
; The setlt wants to use a value that is incremented one more than the dominant
; IV. Don't insert the 1 outside the loop, preventing folding it into the add.
-define void @test([700 x i32]* %nbeaux_.0__558, i32* %i_.16574) {
+define void @test(ptr %nbeaux_.0__558, ptr %i_.16574) {
then.0:
br label %no_exit.2
no_exit.2: ; preds = %no_exit.2, %then.0
%indvar630 = phi i32 [ 0, %then.0 ], [ %indvar.next631, %no_exit.2 ] ; <i32> [#uses=4]
%gep.upgrd.1 = zext i32 %indvar630 to i64 ; <i64> [#uses=1]
- %tmp.38 = getelementptr [700 x i32], [700 x i32]* %nbeaux_.0__558, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
- store i32 0, i32* %tmp.38
+ %tmp.38 = getelementptr [700 x i32], ptr %nbeaux_.0__558, i32 0, i64 %gep.upgrd.1 ; <ptr> [#uses=1]
+ store i32 0, ptr %tmp.38
%inc.2 = add i32 %indvar630, 2 ; <i32> [#uses=2]
%tmp.34 = icmp slt i32 %inc.2, 701 ; <i1> [#uses=1]
%indvar.next631 = add i32 %indvar630, 1 ; <i32> [#uses=1]
br i1 %tmp.34, label %no_exit.2, label %loopexit.2.loopexit
loopexit.2.loopexit: ; preds = %no_exit.2
- store i32 %inc.2, i32* %i_.16574
+ store i32 %inc.2, ptr %i_.16574
ret void
}
declare i1 @pred()
-define void @test1({ i32, i32 }* %P) {
+define void @test1(ptr %P) {
; <label>:0
br label %Loop
Loop: ; preds = %Loop, %0
%INDVAR = phi i32 [ 0, %0 ], [ %INDVAR2, %Loop ] ; <i32> [#uses=3]
- %gep1 = getelementptr { i32, i32 }, { i32, i32 }* %P, i32 %INDVAR, i32 0 ; <i32*> [#uses=1]
- store i32 0, i32* %gep1
- %gep2 = getelementptr { i32, i32 }, { i32, i32 }* %P, i32 %INDVAR, i32 1 ; <i32*> [#uses=1]
- store i32 0, i32* %gep2
+ %gep1 = getelementptr { i32, i32 }, ptr %P, i32 %INDVAR, i32 0 ; <ptr> [#uses=1]
+ store i32 0, ptr %gep1
+ %gep2 = getelementptr { i32, i32 }, ptr %P, i32 %INDVAR, i32 1 ; <ptr> [#uses=1]
+ store i32 0, ptr %gep2
%INDVAR2 = add i32 %INDVAR, 1 ; <i32> [#uses=1]
%cond = call i1 @pred( ) ; <i1> [#uses=1]
br i1 %cond, label %Loop, label %Out
ret void
}
-define void @test2([2 x i32]* %P) {
+define void @test2(ptr %P) {
; <label>:0
br label %Loop
Loop: ; preds = %Loop, %0
%INDVAR = phi i32 [ 0, %0 ], [ %INDVAR2, %Loop ] ; <i32> [#uses=3]
- %gep1 = getelementptr [2 x i32], [2 x i32]* %P, i32 %INDVAR, i64 0 ; <i32*> [#uses=1]
- store i32 0, i32* %gep1
- %gep2 = getelementptr [2 x i32], [2 x i32]* %P, i32 %INDVAR, i64 1 ; <i32*> [#uses=1]
- store i32 0, i32* %gep2
+ %gep1 = getelementptr [2 x i32], ptr %P, i32 %INDVAR, i64 0 ; <ptr> [#uses=1]
+ store i32 0, ptr %gep1
+ %gep2 = getelementptr [2 x i32], ptr %P, i32 %INDVAR, i64 1 ; <ptr> [#uses=1]
+ store i32 0, ptr %gep2
%INDVAR2 = add i32 %INDVAR, 1 ; <i32> [#uses=1]
%cond = call i1 @pred( ) ; <i1> [#uses=1]
br i1 %cond, label %Loop, label %Out
declare i1 @pred(i32)
-define void @test(i8* %PTR) {
+define void @test(ptr %PTR) {
; <label>:0
br label %Loop
Loop: ; preds = %Loop, %0
%INDVAR = phi i32 [ 0, %0 ], [ %INDVAR2, %Loop ] ; <i32> [#uses=2]
- %STRRED = getelementptr i8, i8* %PTR, i32 %INDVAR ; <i8*> [#uses=1]
- store i8 0, i8* %STRRED
+ %STRRED = getelementptr i8, ptr %PTR, i32 %INDVAR ; <ptr> [#uses=1]
+ store i8 0, ptr %STRRED
%INDVAR2 = add i32 %INDVAR, 1 ; <i32> [#uses=2]
;; cannot eliminate indvar
%cond = call i1 @pred( i32 %INDVAR2 ) ; <i1> [#uses=1]
; Declare i2 as legal so that IVUsers accepts to consider %indvar3451
target datalayout = "n2:8:16:32:64"
-define void @Fill_Buffer(i2* %p) nounwind {
+define void @Fill_Buffer(ptr %p) nounwind {
entry:
br label %bb8
bb8:
%indvar34 = phi i32 [ 0, %entry ], [ %indvar.next35, %bb8 ]
%indvar3451 = trunc i32 %indvar34 to i2
- %xmp4344 = load i2, i2* %p
+ %xmp4344 = load i2, ptr %p
%xmp104 = icmp eq i2 %indvar3451, %xmp4344
%indvar.next35 = add i32 %indvar34, 1
br i1 %xmp104, label %bb10, label %bb8
;
; RUN: opt -S -loop-reduce < %s | FileCheck %s
-define void @foo(float* %D, i32 %E) {
+define void @foo(ptr %D, i32 %E) {
entry:
br label %no_exit
no_exit: ; preds = %no_exit, %entry
%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %no_exit ] ; <i32> [#uses=1]
- store volatile float 0.000000e+00, float* %D
+ store volatile float 0.000000e+00, ptr %D
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
; CHECK: icmp
; CHECK-NEXT: br i1
declare i32 @_except_handler3(...)
declare i32 @__CxxFrameHandler3(...)
-declare void @external(i32*)
+declare void @external(ptr)
declare void @reserve()
-define void @f() personality i32 (...)* @_except_handler3 {
+define void @f() personality ptr @_except_handler3 {
; CHECK-LABEL: @f(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[THROW:%.*]]
; CHECK: loop_body:
; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32 [ [[LSR_IV_NEXT:%.*]], [[ITER:%.*]] ], [ 0, [[BLAH2]] ]
; CHECK-NEXT: [[LSR_IV_NEXT]] = add nuw nsw i32 [[LSR_IV]], -1
-; CHECK-NEXT: [[LSR_IV_NEXT1:%.*]] = inttoptr i32 [[LSR_IV_NEXT]] to i8*
-; CHECK-NEXT: [[TMP100:%.*]] = icmp eq i8* [[LSR_IV_NEXT1]], null
+; CHECK-NEXT: [[LSR_IV_NEXT1:%.*]] = inttoptr i32 [[LSR_IV_NEXT]] to ptr
+; CHECK-NEXT: [[TMP100:%.*]] = icmp eq ptr [[LSR_IV_NEXT1]], null
; CHECK-NEXT: br i1 [[TMP100]], label [[UNWIND_OUT:%.*]], label [[ITER]]
; CHECK: iter:
; CHECK-NEXT: br i1 true, label [[UNWIND_OUT]], label [[LOOP_BODY]]
br label %throw
throw: ; preds = %throw, %entry
- %tmp96 = getelementptr inbounds i8, i8* undef, i32 1
+ %tmp96 = getelementptr inbounds i8, ptr undef, i32 1
invoke void @reserve()
to label %throw unwind label %pad
pad: ; preds = %throw
- %phi2 = phi i8* [ %tmp96, %throw ]
+ %phi2 = phi ptr [ %tmp96, %throw ]
%cs = catchswitch within none [label %unreachable] unwind label %blah2
unreachable:
br label %loop_body
loop_body: ; preds = %iter, %pad
- %tmp99 = phi i8* [ %tmp101, %iter ], [ %phi2, %blah2 ]
- %tmp100 = icmp eq i8* %tmp99, undef
+ %tmp99 = phi ptr [ %tmp101, %iter ], [ %phi2, %blah2 ]
+ %tmp100 = icmp eq ptr %tmp99, undef
br i1 %tmp100, label %unwind_out, label %iter
iter: ; preds = %loop_body
- %tmp101 = getelementptr inbounds i8, i8* %tmp99, i32 1
+ %tmp101 = getelementptr inbounds i8, ptr %tmp99, i32 1
br i1 undef, label %unwind_out, label %loop_body
unwind_out: ; preds = %iter, %loop_body
cleanupret from %cleanuppadi4.i.i.i unwind to caller
}
-define void @g() personality i32 (...)* @_except_handler3 {
+define void @g() personality ptr @_except_handler3 {
; CHECK-LABEL: @g(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[THROW:%.*]]
; CHECK: loop_body:
; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32 [ [[LSR_IV_NEXT:%.*]], [[ITER:%.*]] ], [ 0, [[BLAH:%.*]] ]
; CHECK-NEXT: [[LSR_IV_NEXT]] = add nuw nsw i32 [[LSR_IV]], -1
-; CHECK-NEXT: [[LSR_IV_NEXT1:%.*]] = inttoptr i32 [[LSR_IV_NEXT]] to i8*
-; CHECK-NEXT: [[TMP100:%.*]] = icmp eq i8* [[LSR_IV_NEXT1]], null
+; CHECK-NEXT: [[LSR_IV_NEXT1:%.*]] = inttoptr i32 [[LSR_IV_NEXT]] to ptr
+; CHECK-NEXT: [[TMP100:%.*]] = icmp eq ptr [[LSR_IV_NEXT1]], null
; CHECK-NEXT: br i1 [[TMP100]], label [[UNWIND_OUT:%.*]], label [[ITER]]
; CHECK: iter:
; CHECK-NEXT: br i1 true, label [[UNWIND_OUT]], label [[LOOP_BODY]]
br label %throw
throw: ; preds = %throw, %entry
- %tmp96 = getelementptr inbounds i8, i8* undef, i32 1
+ %tmp96 = getelementptr inbounds i8, ptr undef, i32 1
invoke void @reserve()
to label %throw unwind label %pad
pad:
- %phi2 = phi i8* [ %tmp96, %throw ]
+ %phi2 = phi ptr [ %tmp96, %throw ]
%cs = catchswitch within none [label %unreachable, label %blah] unwind to caller
unreachable:
ret void
loop_body: ; preds = %iter, %pad
- %tmp99 = phi i8* [ %tmp101, %iter ], [ %phi2, %blah ]
- %tmp100 = icmp eq i8* %tmp99, undef
+ %tmp99 = phi ptr [ %tmp101, %iter ], [ %phi2, %blah ]
+ %tmp100 = icmp eq ptr %tmp99, undef
br i1 %tmp100, label %unwind_out, label %iter
iter: ; preds = %loop_body
- %tmp101 = getelementptr inbounds i8, i8* %tmp99, i32 1
+ %tmp101 = getelementptr inbounds i8, ptr %tmp99, i32 1
br i1 undef, label %unwind_out, label %loop_body
}
-define void @h() personality i32 (...)* @_except_handler3 {
+define void @h() personality ptr @_except_handler3 {
; CHECK-LABEL: @h(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[THROW:%.*]]
; CHECK: loop_body:
; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32 [ [[LSR_IV_NEXT:%.*]], [[ITER:%.*]] ], [ 0, [[BLUG:%.*]] ]
; CHECK-NEXT: [[LSR_IV_NEXT]] = add nuw nsw i32 [[LSR_IV]], -1
-; CHECK-NEXT: [[LSR_IV_NEXT1:%.*]] = inttoptr i32 [[LSR_IV_NEXT]] to i8*
-; CHECK-NEXT: [[TMP100:%.*]] = icmp eq i8* [[LSR_IV_NEXT1]], null
+; CHECK-NEXT: [[LSR_IV_NEXT1:%.*]] = inttoptr i32 [[LSR_IV_NEXT]] to ptr
+; CHECK-NEXT: [[TMP100:%.*]] = icmp eq ptr [[LSR_IV_NEXT1]], null
; CHECK-NEXT: br i1 [[TMP100]], label [[UNWIND_OUT:%.*]], label [[ITER]]
; CHECK: iter:
; CHECK-NEXT: br i1 true, label [[UNWIND_OUT]], label [[LOOP_BODY]]
br label %throw
throw: ; preds = %throw, %entry
- %tmp96 = getelementptr inbounds i8, i8* undef, i32 1
+ %tmp96 = getelementptr inbounds i8, ptr undef, i32 1
invoke void @reserve()
to label %throw unwind label %pad
unreachable
blug:
- %phi2 = phi i8* [ %tmp96, %pad ]
+ %phi2 = phi ptr [ %tmp96, %pad ]
%catchpad = catchpad within %cs []
br label %loop_body
ret void
loop_body: ; preds = %iter, %pad
- %tmp99 = phi i8* [ %tmp101, %iter ], [ %phi2, %blug ]
- %tmp100 = icmp eq i8* %tmp99, undef
+ %tmp99 = phi ptr [ %tmp101, %iter ], [ %phi2, %blug ]
+ %tmp100 = icmp eq ptr %tmp99, undef
br i1 %tmp100, label %unwind_out, label %iter
iter: ; preds = %loop_body
- %tmp101 = getelementptr inbounds i8, i8* %tmp99, i32 1
+ %tmp101 = getelementptr inbounds i8, ptr %tmp99, i32 1
br i1 undef, label %unwind_out, label %loop_body
}
-define void @i() personality i32 (...)* @_except_handler3 {
+define void @i() personality ptr @_except_handler3 {
; CHECK-LABEL: @i(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[THROW:%.*]]
; CHECK: loop_body:
; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32 [ [[LSR_IV_NEXT:%.*]], [[ITER:%.*]] ], [ 0, [[LOOP_HEAD]] ]
; CHECK-NEXT: [[LSR_IV_NEXT]] = add nuw nsw i32 [[LSR_IV]], -1
-; CHECK-NEXT: [[LSR_IV_NEXT1:%.*]] = inttoptr i32 [[LSR_IV_NEXT]] to i8*
-; CHECK-NEXT: [[TMP100:%.*]] = icmp eq i8* [[LSR_IV_NEXT1]], null
+; CHECK-NEXT: [[LSR_IV_NEXT1:%.*]] = inttoptr i32 [[LSR_IV_NEXT]] to ptr
+; CHECK-NEXT: [[TMP100:%.*]] = icmp eq ptr [[LSR_IV_NEXT1]], null
; CHECK-NEXT: br i1 [[TMP100]], label [[UNWIND_OUT:%.*]], label [[ITER]]
; CHECK: iter:
; CHECK-NEXT: br i1 true, label [[UNWIND_OUT]], label [[LOOP_BODY]]
br label %throw
throw: ; preds = %throw, %entry
- %tmp96 = getelementptr inbounds i8, i8* undef, i32 1
+ %tmp96 = getelementptr inbounds i8, ptr undef, i32 1
invoke void @reserve()
to label %throw unwind label %catchpad
catchpad: ; preds = %throw
- %phi2 = phi i8* [ %tmp96, %throw ]
+ %phi2 = phi ptr [ %tmp96, %throw ]
%cs = catchswitch within none [label %cp_body] unwind label %cleanuppad
cp_body:
br label %loop_body
loop_body: ; preds = %iter, %catchpad
- %tmp99 = phi i8* [ %tmp101, %iter ], [ %phi2, %loop_head ]
- %tmp100 = icmp eq i8* %tmp99, undef
+ %tmp99 = phi ptr [ %tmp101, %iter ], [ %phi2, %loop_head ]
+ %tmp100 = icmp eq ptr %tmp99, undef
br i1 %tmp100, label %unwind_out, label %iter
iter: ; preds = %loop_body
- %tmp101 = getelementptr inbounds i8, i8* %tmp99, i32 1
+ %tmp101 = getelementptr inbounds i8, ptr %tmp99, i32 1
br i1 undef, label %unwind_out, label %loop_body
unwind_out: ; preds = %iter, %loop_body
unreachable
}
-define void @test1(i32* %b, i32* %c) personality i32 (...)* @__CxxFrameHandler3 {
+define void @test1(ptr %b, ptr %c) personality ptr @__CxxFrameHandler3 {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK: for.cond:
-; CHECK-NEXT: [[D_0:%.*]] = phi i32* [ [[B:%.*]], [[ENTRY:%.*]] ], [ [[INCDEC_PTR:%.*]], [[FOR_INC:%.*]] ]
-; CHECK-NEXT: invoke void @external(i32* [[D_0]])
+; CHECK-NEXT: [[D_0:%.*]] = phi ptr [ [[B:%.*]], [[ENTRY:%.*]] ], [ [[INCDEC_PTR:%.*]], [[FOR_INC:%.*]] ]
+; CHECK-NEXT: invoke void @external(ptr [[D_0]])
; CHECK-NEXT: to label [[FOR_INC]] unwind label [[CATCH_DISPATCH:%.*]]
; CHECK: for.inc:
-; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i32, i32* [[D_0]], i32 1
+; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i32, ptr [[D_0]], i32 1
; CHECK-NEXT: br label [[FOR_COND]]
; CHECK: catch.dispatch:
; CHECK-NEXT: [[CS:%.*]] = catchswitch within none [label %catch] unwind label [[CATCH_DISPATCH_2:%.*]]
; CHECK: catch:
-; CHECK-NEXT: [[TMP0:%.*]] = catchpad within [[CS]] [i8* null, i32 64, i8* null]
+; CHECK-NEXT: [[TMP0:%.*]] = catchpad within [[CS]] [ptr null, i32 64, ptr null]
; CHECK-NEXT: catchret from [[TMP0]] to label [[TRY_CONT:%.*]]
; CHECK: try.cont:
-; CHECK-NEXT: invoke void @external(i32* [[C:%.*]])
+; CHECK-NEXT: invoke void @external(ptr [[C:%.*]])
; CHECK-NEXT: to label [[TRY_CONT_7:%.*]] unwind label [[CATCH_DISPATCH_2]]
; CHECK: catch.dispatch.2:
-; CHECK-NEXT: [[E_0:%.*]] = phi i32* [ [[C]], [[TRY_CONT]] ], [ [[B]], [[CATCH_DISPATCH]] ]
+; CHECK-NEXT: [[E_0:%.*]] = phi ptr [ [[C]], [[TRY_CONT]] ], [ [[B]], [[CATCH_DISPATCH]] ]
; CHECK-NEXT: [[CS2:%.*]] = catchswitch within none [label %catch.4] unwind to caller
; CHECK: catch.4:
-; CHECK-NEXT: [[TMP1:%.*]] = catchpad within [[CS2]] [i8* null, i32 64, i8* null]
+; CHECK-NEXT: [[TMP1:%.*]] = catchpad within [[CS2]] [ptr null, i32 64, ptr null]
; CHECK-NEXT: unreachable
; CHECK: try.cont.7:
; CHECK-NEXT: ret void
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %d.0 = phi i32* [ %b, %entry ], [ %incdec.ptr, %for.inc ]
- invoke void @external(i32* %d.0)
+ %d.0 = phi ptr [ %b, %entry ], [ %incdec.ptr, %for.inc ]
+ invoke void @external(ptr %d.0)
to label %for.inc unwind label %catch.dispatch
for.inc: ; preds = %for.cond
- %incdec.ptr = getelementptr inbounds i32, i32* %d.0, i32 1
+ %incdec.ptr = getelementptr inbounds i32, ptr %d.0, i32 1
br label %for.cond
catch.dispatch: ; preds = %for.cond
%cs = catchswitch within none [label %catch] unwind label %catch.dispatch.2
catch: ; preds = %catch.dispatch
- %0 = catchpad within %cs [i8* null, i32 64, i8* null]
+ %0 = catchpad within %cs [ptr null, i32 64, ptr null]
catchret from %0 to label %try.cont
try.cont: ; preds = %catch
- invoke void @external(i32* %c)
+ invoke void @external(ptr %c)
to label %try.cont.7 unwind label %catch.dispatch.2
catch.dispatch.2: ; preds = %try.cont, %catchendblock
- %e.0 = phi i32* [ %c, %try.cont ], [ %b, %catch.dispatch ]
+ %e.0 = phi ptr [ %c, %try.cont ], [ %b, %catch.dispatch ]
%cs2 = catchswitch within none [label %catch.4] unwind to caller
catch.4: ; preds = %catch.dispatch.2
- catchpad within %cs2 [i8* null, i32 64, i8* null]
+ catchpad within %cs2 [ptr null, i32 64, ptr null]
unreachable
try.cont.7: ; preds = %try.cont
ret void
}
-define i32 @test2() personality i32 (...)* @_except_handler3 {
+define i32 @test2() personality ptr @_except_handler3 {
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-NEXT: [[TMP18:%.*]] = catchswitch within none [label %catch.handler] unwind to caller
; CHECK: catch.handler:
; CHECK-NEXT: [[PHI_LCSSA:%.*]] = phi i32 [ [[PHI]], [[CATCH_DISPATCH]] ]
-; CHECK-NEXT: [[TMP19:%.*]] = catchpad within [[TMP18]] [i8* null]
+; CHECK-NEXT: [[TMP19:%.*]] = catchpad within [[TMP18]] [ptr null]
; CHECK-NEXT: catchret from [[TMP19]] to label [[DONE:%.*]]
; CHECK: done:
; CHECK-NEXT: ret i32 [[PHI_LCSSA]]
catch.handler: ; preds = %catch.dispatch
%phi.lcssa = phi i32 [ %phi, %catch.dispatch ]
- %tmp19 = catchpad within %tmp18 [i8* null]
+ %tmp19 = catchpad within %tmp18 [ptr null]
catchret from %tmp19 to label %done
done:
; being processed many times. I will also naturally have a setupcost of
; 0xffffffff, which LSR will treat as invalid.
; CHECK-LABEL: func
-; CHECK: load i32, i32* %gep
+; CHECK: load i32, ptr %gep
-define i32 @func(i32* %in) {
+define i32 @func(ptr %in) {
entry:
- %load = load i32, i32* %in, align 4
+ %load = load i32, ptr %in, align 4
%a1 = add i32 %load, 1
%m1 = mul i32 %a1, %load
%a2 = add i32 %m1, 1
loop:
%lp = phi i32 [ %m31, %entry ], [ %linc, %loop ]
%0 = sext i32 %lp to i64
- %gep = getelementptr inbounds i32, i32* %in, i64 %0
- %loopload = load i32, i32* %gep, align 4
- store i32 0, i32* %gep, align 4
+ %gep = getelementptr inbounds i32, ptr %in, i64 %0
+ %loopload = load i32, ptr %gep, align 4
+ store i32 0, ptr %gep, align 4
%linc = add i32 %lp, 1
%lcmp = icmp eq i32 %linc, 100
br i1 %lcmp, label %exit, label %loop
; RUN: opt < %s -loop-reduce -verify
target triple = "x86_64-apple-darwin10"
-define void @myquicksort(i8* %a) nounwind ssp {
+define void @myquicksort(ptr %a) nounwind ssp {
entry:
br i1 undef, label %loop1, label %return
%indvar414 = phi i64 [ %indvar.next415, %loop2.backedge ], [ 0, %loop1 ]
%tmp473 = mul i64 %indvar414, -4
%tmp485 = add i64 %tmp484, %tmp473
- %storemerge4 = getelementptr i8, i8* %a, i64 %tmp485
- %0 = icmp ugt i8* %storemerge4, %a
+ %storemerge4 = getelementptr i8, ptr %a, i64 %tmp485
+ %0 = icmp ugt ptr %storemerge4, %a
br i1 false, label %loop2.exit, label %loop2.backedge
loop2.backedge: ; preds = %loop2
; Check that the index of 'P[outer]' is pulled out of the loop.
; RUN: opt < %s -loop-reduce -S | \
-; RUN: not grep "getelementptr.*%outer.*%INDVAR"
+; RUN: not grep "getelementptr.*ptr %INDVAR"
target datalayout = "e-p:32:32:32-n8:16:32"
declare i1 @pred()
declare i32 @foo()
-define void @test([10000 x i32]* %P) {
+define void @test(ptr %P) {
; <label>:0
%outer = call i32 @foo( ) ; <i32> [#uses=1]
br label %Loop
Loop: ; preds = %Loop, %0
%INDVAR = phi i32 [ 0, %0 ], [ %INDVAR2, %Loop ] ; <i32> [#uses=2]
- %STRRED = getelementptr [10000 x i32], [10000 x i32]* %P, i32 %outer, i32 %INDVAR ; <i32*> [#uses=1]
- store i32 0, i32* %STRRED
+ %STRRED = getelementptr [10000 x i32], ptr %P, i32 %outer, i32 %INDVAR ; <ptr> [#uses=1]
+ store i32 0, ptr %STRRED
%INDVAR2 = add i32 %INDVAR, 1 ; <i32> [#uses=1]
%cond = call i1 @pred( ) ; <i1> [#uses=1]
br i1 %cond, label %Loop, label %Out
; Check that the index of 'P[outer]' is pulled out of the loop.
; RUN: opt < %s -loop-reduce -S | \
-; RUN: not grep "getelementptr.*%outer.*%INDVAR"
+; RUN: not grep "getelementptr.*ptr %INDVAR"
target datalayout = "e-p:32:32:32-n32"
declare i1 @pred()
-define void @test([10000 x i32]* %P, i32 %outer) {
+define void @test(ptr %P, i32 %outer) {
; <label>:0
br label %Loop
Loop: ; preds = %Loop, %0
%INDVAR = phi i32 [ 0, %0 ], [ %INDVAR2, %Loop ] ; <i32> [#uses=2]
- %STRRED = getelementptr [10000 x i32], [10000 x i32]* %P, i32 %outer, i32 %INDVAR ; <i32*> [#uses=1]
- store i32 0, i32* %STRRED
+ %STRRED = getelementptr [10000 x i32], ptr %P, i32 %outer, i32 %INDVAR ; <ptr> [#uses=1]
+ store i32 0, ptr %STRRED
%INDVAR2 = add i32 %INDVAR, 1 ; <i32> [#uses=1]
%cond = call i1 @pred( ) ; <i1> [#uses=1]
br i1 %cond, label %Loop, label %Out
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
; Function Attrs: nounwind readnone uwtable
define dso_local i32 @foo(i32 %arg, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6) local_unnamed_addr #3 {
%tmp16 = alloca [100 x [100 x i32]], align 16
%tmp17 = alloca [100 x [100 x i32]], align 16
%tmp18 = alloca [100 x [100 x i32]], align 16
- %tmp19 = bitcast [100 x i32]* %tmp to i8*
- call void @llvm.lifetime.start.p0i8(i64 400, i8* nonnull %tmp19) #4
- call void @llvm.memset.p0i8.i64(i8* nonnull align 16 %tmp19, i8 0, i64 400, i1 false)
- %tmp20 = bitcast [100 x i32]* %tmp7 to i8*
- call void @llvm.lifetime.start.p0i8(i64 400, i8* nonnull %tmp20) #4
- call void @llvm.memset.p0i8.i64(i8* nonnull align 16 %tmp20, i8 0, i64 400, i1 false)
- %tmp21 = bitcast [100 x i32]* %tmp8 to i8*
- call void @llvm.lifetime.start.p0i8(i64 400, i8* nonnull %tmp21) #4
- call void @llvm.memset.p0i8.i64(i8* nonnull align 16 %tmp21, i8 0, i64 400, i1 false)
- %tmp22 = bitcast [100 x [100 x i32]]* %tmp9 to i8*
- call void @llvm.lifetime.start.p0i8(i64 40000, i8* nonnull %tmp22) #4
- call void @llvm.memset.p0i8.i64(i8* nonnull align 16 %tmp22, i8 0, i64 40000, i1 false)
- %tmp23 = bitcast [100 x i32]* %tmp10 to i8*
- call void @llvm.lifetime.start.p0i8(i64 400, i8* nonnull %tmp23) #4
- call void @llvm.memset.p0i8.i64(i8* nonnull align 16 %tmp23, i8 0, i64 400, i1 false)
- %tmp24 = bitcast [100 x [100 x i32]]* %tmp11 to i8*
- call void @llvm.lifetime.start.p0i8(i64 40000, i8* nonnull %tmp24) #4
- call void @llvm.memset.p0i8.i64(i8* nonnull align 16 %tmp24, i8 0, i64 40000, i1 false)
- %tmp25 = bitcast [100 x i32]* %tmp12 to i8*
- call void @llvm.lifetime.start.p0i8(i64 400, i8* nonnull %tmp25) #4
- call void @llvm.memset.p0i8.i64(i8* nonnull align 16 %tmp25, i8 0, i64 400, i1 false)
- %tmp26 = bitcast [100 x i32]* %tmp13 to i8*
- call void @llvm.lifetime.start.p0i8(i64 400, i8* nonnull %tmp26) #4
- call void @llvm.memset.p0i8.i64(i8* nonnull align 16 %tmp26, i8 0, i64 400, i1 false)
- %tmp27 = bitcast [100 x [100 x i32]]* %tmp14 to i8*
- call void @llvm.lifetime.start.p0i8(i64 40000, i8* nonnull %tmp27) #4
- call void @llvm.memset.p0i8.i64(i8* nonnull align 16 %tmp27, i8 0, i64 40000, i1 false)
- %tmp28 = bitcast [100 x i32]* %tmp15 to i8*
- call void @llvm.lifetime.start.p0i8(i64 400, i8* nonnull %tmp28) #4
- call void @llvm.memset.p0i8.i64(i8* nonnull align 16 %tmp28, i8 0, i64 400, i1 false)
- %tmp29 = bitcast [100 x [100 x i32]]* %tmp16 to i8*
- call void @llvm.lifetime.start.p0i8(i64 40000, i8* nonnull %tmp29) #4
- call void @llvm.memset.p0i8.i64(i8* nonnull align 16 %tmp29, i8 0, i64 40000, i1 false)
- %tmp30 = bitcast [100 x [100 x i32]]* %tmp17 to i8*
- call void @llvm.lifetime.start.p0i8(i64 40000, i8* nonnull %tmp30) #4
- call void @llvm.memset.p0i8.i64(i8* nonnull align 16 %tmp30, i8 0, i64 40000, i1 false)
- %tmp31 = bitcast [100 x [100 x i32]]* %tmp18 to i8*
- call void @llvm.lifetime.start.p0i8(i64 40000, i8* nonnull %tmp31) #4
- call void @llvm.memset.p0i8.i64(i8* nonnull align 16 %tmp31, i8 0, i64 40000, i1 false)
- %tmp32 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp8, i64 0, i64 3
+ call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp) #4
+ call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp, i8 0, i64 400, i1 false)
+ call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp7) #4
+ call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp7, i8 0, i64 400, i1 false)
+ call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp8) #4
+ call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp8, i8 0, i64 400, i1 false)
+ call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp9) #4
+ call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp9, i8 0, i64 40000, i1 false)
+ call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp10) #4
+ call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp10, i8 0, i64 400, i1 false)
+ call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp11) #4
+ call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp11, i8 0, i64 40000, i1 false)
+ call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp12) #4
+ call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp12, i8 0, i64 400, i1 false)
+ call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp13) #4
+ call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp13, i8 0, i64 400, i1 false)
+ call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp14) #4
+ call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp14, i8 0, i64 40000, i1 false)
+ call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp15) #4
+ call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp15, i8 0, i64 400, i1 false)
+ call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp16) #4
+ call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp16, i8 0, i64 40000, i1 false)
+ call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp17) #4
+ call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp17, i8 0, i64 40000, i1 false)
+ call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp18) #4
+ call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp18, i8 0, i64 40000, i1 false)
+ %tmp32 = getelementptr inbounds [100 x i32], ptr %tmp8, i64 0, i64 3
br label %bb33
bb33: ; preds = %bb33, %bb
%tmp35 = trunc i64 %tmp34 to i32
%tmp36 = add i32 %tmp35, 48
%tmp37 = urem i32 %tmp36, 101
- %tmp38 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 %tmp34
- store i32 %tmp37, i32* %tmp38, align 16
+ %tmp38 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 %tmp34
+ store i32 %tmp37, ptr %tmp38, align 16
%tmp39 = or i64 %tmp34, 1
%tmp40 = trunc i64 %tmp39 to i32
%tmp41 = sub i32 48, %tmp40
%tmp42 = urem i32 %tmp41, 101
- %tmp43 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 %tmp39
- store i32 %tmp42, i32* %tmp43, align 4
+ %tmp43 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 %tmp39
+ store i32 %tmp42, ptr %tmp43, align 4
%tmp44 = or i64 %tmp34, 2
%tmp45 = trunc i64 %tmp44 to i32
%tmp46 = add i32 %tmp45, 48
%tmp47 = urem i32 %tmp46, 101
- %tmp48 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 %tmp44
- store i32 %tmp47, i32* %tmp48, align 8
+ %tmp48 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 %tmp44
+ store i32 %tmp47, ptr %tmp48, align 8
%tmp49 = or i64 %tmp34, 3
%tmp50 = trunc i64 %tmp49 to i32
%tmp51 = sub i32 48, %tmp50
%tmp52 = urem i32 %tmp51, 101
- %tmp53 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 %tmp49
- store i32 %tmp52, i32* %tmp53, align 4
+ %tmp53 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 %tmp49
+ store i32 %tmp52, ptr %tmp53, align 4
%tmp54 = add nuw nsw i64 %tmp34, 4
%tmp55 = icmp eq i64 %tmp54, 100
br i1 %tmp55, label %bb56, label %bb33
bb56: ; preds = %bb33
- %tmp57 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp16, i64 0, i64 88, i64 91
+ %tmp57 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp16, i64 0, i64 88, i64 91
br label %bb58
bb58: ; preds = %bb58, %bb56
%tmp60 = trunc i64 %tmp59 to i32
%tmp61 = add i32 %tmp60, 83
%tmp62 = urem i32 %tmp61, 101
- %tmp63 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp7, i64 0, i64 %tmp59
- store i32 %tmp62, i32* %tmp63, align 16
+ %tmp63 = getelementptr inbounds [100 x i32], ptr %tmp7, i64 0, i64 %tmp59
+ store i32 %tmp62, ptr %tmp63, align 16
%tmp64 = or i64 %tmp59, 1
%tmp65 = trunc i64 %tmp64 to i32
%tmp66 = sub i32 83, %tmp65
%tmp67 = urem i32 %tmp66, 101
- %tmp68 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp7, i64 0, i64 %tmp64
- store i32 %tmp67, i32* %tmp68, align 4
+ %tmp68 = getelementptr inbounds [100 x i32], ptr %tmp7, i64 0, i64 %tmp64
+ store i32 %tmp67, ptr %tmp68, align 4
%tmp69 = or i64 %tmp59, 2
%tmp70 = trunc i64 %tmp69 to i32
%tmp71 = add i32 %tmp70, 83
%tmp72 = urem i32 %tmp71, 101
- %tmp73 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp7, i64 0, i64 %tmp69
- store i32 %tmp72, i32* %tmp73, align 8
+ %tmp73 = getelementptr inbounds [100 x i32], ptr %tmp7, i64 0, i64 %tmp69
+ store i32 %tmp72, ptr %tmp73, align 8
%tmp74 = or i64 %tmp59, 3
%tmp75 = trunc i64 %tmp74 to i32
%tmp76 = sub i32 83, %tmp75
%tmp77 = urem i32 %tmp76, 101
- %tmp78 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp7, i64 0, i64 %tmp74
- store i32 %tmp77, i32* %tmp78, align 4
+ %tmp78 = getelementptr inbounds [100 x i32], ptr %tmp7, i64 0, i64 %tmp74
+ store i32 %tmp77, ptr %tmp78, align 4
%tmp79 = add nuw nsw i64 %tmp59, 4
%tmp80 = icmp eq i64 %tmp79, 100
br i1 %tmp80, label %bb81, label %bb58
%tmp83 = trunc i64 %tmp82 to i32
%tmp84 = add i32 %tmp83, 15
%tmp85 = urem i32 %tmp84, 101
- %tmp86 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp8, i64 0, i64 %tmp82
- store i32 %tmp85, i32* %tmp86, align 16
+ %tmp86 = getelementptr inbounds [100 x i32], ptr %tmp8, i64 0, i64 %tmp82
+ store i32 %tmp85, ptr %tmp86, align 16
%tmp87 = or i64 %tmp82, 1
%tmp88 = trunc i64 %tmp87 to i32
%tmp89 = sub i32 15, %tmp88
%tmp90 = urem i32 %tmp89, 101
- %tmp91 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp8, i64 0, i64 %tmp87
- store i32 %tmp90, i32* %tmp91, align 4
+ %tmp91 = getelementptr inbounds [100 x i32], ptr %tmp8, i64 0, i64 %tmp87
+ store i32 %tmp90, ptr %tmp91, align 4
%tmp92 = or i64 %tmp82, 2
%tmp93 = trunc i64 %tmp92 to i32
%tmp94 = add i32 %tmp93, 15
%tmp95 = urem i32 %tmp94, 101
- %tmp96 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp8, i64 0, i64 %tmp92
- store i32 %tmp95, i32* %tmp96, align 8
+ %tmp96 = getelementptr inbounds [100 x i32], ptr %tmp8, i64 0, i64 %tmp92
+ store i32 %tmp95, ptr %tmp96, align 8
%tmp97 = or i64 %tmp82, 3
%tmp98 = trunc i64 %tmp97 to i32
%tmp99 = sub i32 15, %tmp98
%tmp100 = urem i32 %tmp99, 101
- %tmp101 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp8, i64 0, i64 %tmp97
- store i32 %tmp100, i32* %tmp101, align 4
+ %tmp101 = getelementptr inbounds [100 x i32], ptr %tmp8, i64 0, i64 %tmp97
+ store i32 %tmp100, ptr %tmp101, align 4
%tmp102 = add nuw nsw i64 %tmp82, 4
%tmp103 = icmp eq i64 %tmp102, 100
br i1 %tmp103, label %bb104, label %bb81
%tmp106 = trunc i64 %tmp105 to i32
%tmp107 = add i32 %tmp106, 60
%tmp108 = urem i32 %tmp107, 101
- %tmp109 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp9, i64 0, i64 0, i64 %tmp105
- store i32 %tmp108, i32* %tmp109, align 16
+ %tmp109 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp9, i64 0, i64 0, i64 %tmp105
+ store i32 %tmp108, ptr %tmp109, align 16
%tmp110 = or i64 %tmp105, 1
%tmp111 = trunc i64 %tmp110 to i32
%tmp112 = sub i32 60, %tmp111
%tmp113 = urem i32 %tmp112, 101
- %tmp114 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp9, i64 0, i64 0, i64 %tmp110
- store i32 %tmp113, i32* %tmp114, align 4
+ %tmp114 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp9, i64 0, i64 0, i64 %tmp110
+ store i32 %tmp113, ptr %tmp114, align 4
%tmp115 = or i64 %tmp105, 2
%tmp116 = trunc i64 %tmp115 to i32
%tmp117 = add i32 %tmp116, 60
%tmp118 = urem i32 %tmp117, 101
- %tmp119 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp9, i64 0, i64 0, i64 %tmp115
- store i32 %tmp118, i32* %tmp119, align 8
+ %tmp119 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp9, i64 0, i64 0, i64 %tmp115
+ store i32 %tmp118, ptr %tmp119, align 8
%tmp120 = or i64 %tmp105, 3
%tmp121 = trunc i64 %tmp120 to i32
%tmp122 = sub i32 60, %tmp121
%tmp123 = urem i32 %tmp122, 101
- %tmp124 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp9, i64 0, i64 0, i64 %tmp120
- store i32 %tmp123, i32* %tmp124, align 4
+ %tmp124 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp9, i64 0, i64 0, i64 %tmp120
+ store i32 %tmp123, ptr %tmp124, align 4
%tmp125 = add nuw nsw i64 %tmp105, 4
%tmp126 = icmp eq i64 %tmp125, 10000
br i1 %tmp126, label %bb127, label %bb104
%tmp129 = trunc i64 %tmp128 to i32
%tmp130 = add i32 %tmp129, 87
%tmp131 = urem i32 %tmp130, 101
- %tmp132 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp10, i64 0, i64 %tmp128
- store i32 %tmp131, i32* %tmp132, align 16
+ %tmp132 = getelementptr inbounds [100 x i32], ptr %tmp10, i64 0, i64 %tmp128
+ store i32 %tmp131, ptr %tmp132, align 16
%tmp133 = or i64 %tmp128, 1
%tmp134 = trunc i64 %tmp133 to i32
%tmp135 = sub i32 87, %tmp134
%tmp136 = urem i32 %tmp135, 101
- %tmp137 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp10, i64 0, i64 %tmp133
- store i32 %tmp136, i32* %tmp137, align 4
+ %tmp137 = getelementptr inbounds [100 x i32], ptr %tmp10, i64 0, i64 %tmp133
+ store i32 %tmp136, ptr %tmp137, align 4
%tmp138 = or i64 %tmp128, 2
%tmp139 = trunc i64 %tmp138 to i32
%tmp140 = add i32 %tmp139, 87
%tmp141 = urem i32 %tmp140, 101
- %tmp142 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp10, i64 0, i64 %tmp138
- store i32 %tmp141, i32* %tmp142, align 8
+ %tmp142 = getelementptr inbounds [100 x i32], ptr %tmp10, i64 0, i64 %tmp138
+ store i32 %tmp141, ptr %tmp142, align 8
%tmp143 = or i64 %tmp128, 3
%tmp144 = trunc i64 %tmp143 to i32
%tmp145 = sub i32 87, %tmp144
%tmp146 = urem i32 %tmp145, 101
- %tmp147 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp10, i64 0, i64 %tmp143
- store i32 %tmp146, i32* %tmp147, align 4
+ %tmp147 = getelementptr inbounds [100 x i32], ptr %tmp10, i64 0, i64 %tmp143
+ store i32 %tmp146, ptr %tmp147, align 4
%tmp148 = add nuw nsw i64 %tmp128, 4
%tmp149 = icmp eq i64 %tmp148, 100
br i1 %tmp149, label %bb150, label %bb127
%tmp152 = trunc i64 %tmp151 to i32
%tmp153 = add i32 %tmp152, 36
%tmp154 = urem i32 %tmp153, 101
- %tmp155 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp11, i64 0, i64 0, i64 %tmp151
- store i32 %tmp154, i32* %tmp155, align 16
+ %tmp155 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp11, i64 0, i64 0, i64 %tmp151
+ store i32 %tmp154, ptr %tmp155, align 16
%tmp156 = or i64 %tmp151, 1
%tmp157 = trunc i64 %tmp156 to i32
%tmp158 = sub i32 36, %tmp157
%tmp159 = urem i32 %tmp158, 101
- %tmp160 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp11, i64 0, i64 0, i64 %tmp156
- store i32 %tmp159, i32* %tmp160, align 4
+ %tmp160 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp11, i64 0, i64 0, i64 %tmp156
+ store i32 %tmp159, ptr %tmp160, align 4
%tmp161 = or i64 %tmp151, 2
%tmp162 = trunc i64 %tmp161 to i32
%tmp163 = add i32 %tmp162, 36
%tmp164 = urem i32 %tmp163, 101
- %tmp165 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp11, i64 0, i64 0, i64 %tmp161
- store i32 %tmp164, i32* %tmp165, align 8
+ %tmp165 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp11, i64 0, i64 0, i64 %tmp161
+ store i32 %tmp164, ptr %tmp165, align 8
%tmp166 = or i64 %tmp151, 3
%tmp167 = trunc i64 %tmp166 to i32
%tmp168 = sub i32 36, %tmp167
%tmp169 = urem i32 %tmp168, 101
- %tmp170 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp11, i64 0, i64 0, i64 %tmp166
- store i32 %tmp169, i32* %tmp170, align 4
+ %tmp170 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp11, i64 0, i64 0, i64 %tmp166
+ store i32 %tmp169, ptr %tmp170, align 4
%tmp171 = add nuw nsw i64 %tmp151, 4
%tmp172 = icmp eq i64 %tmp171, 10000
br i1 %tmp172, label %bb173, label %bb150
%tmp175 = trunc i64 %tmp174 to i32
%tmp176 = add i32 %tmp175, 27
%tmp177 = urem i32 %tmp176, 101
- %tmp178 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp12, i64 0, i64 %tmp174
- store i32 %tmp177, i32* %tmp178, align 16
+ %tmp178 = getelementptr inbounds [100 x i32], ptr %tmp12, i64 0, i64 %tmp174
+ store i32 %tmp177, ptr %tmp178, align 16
%tmp179 = or i64 %tmp174, 1
%tmp180 = trunc i64 %tmp179 to i32
%tmp181 = sub i32 27, %tmp180
%tmp182 = urem i32 %tmp181, 101
- %tmp183 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp12, i64 0, i64 %tmp179
- store i32 %tmp182, i32* %tmp183, align 4
+ %tmp183 = getelementptr inbounds [100 x i32], ptr %tmp12, i64 0, i64 %tmp179
+ store i32 %tmp182, ptr %tmp183, align 4
%tmp184 = or i64 %tmp174, 2
%tmp185 = trunc i64 %tmp184 to i32
%tmp186 = add i32 %tmp185, 27
%tmp187 = urem i32 %tmp186, 101
- %tmp188 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp12, i64 0, i64 %tmp184
- store i32 %tmp187, i32* %tmp188, align 8
+ %tmp188 = getelementptr inbounds [100 x i32], ptr %tmp12, i64 0, i64 %tmp184
+ store i32 %tmp187, ptr %tmp188, align 8
%tmp189 = or i64 %tmp174, 3
%tmp190 = trunc i64 %tmp189 to i32
%tmp191 = sub i32 27, %tmp190
%tmp192 = urem i32 %tmp191, 101
- %tmp193 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp12, i64 0, i64 %tmp189
- store i32 %tmp192, i32* %tmp193, align 4
+ %tmp193 = getelementptr inbounds [100 x i32], ptr %tmp12, i64 0, i64 %tmp189
+ store i32 %tmp192, ptr %tmp193, align 4
%tmp194 = add nuw nsw i64 %tmp174, 4
%tmp195 = icmp eq i64 %tmp194, 100
br i1 %tmp195, label %bb196, label %bb173
%tmp198 = trunc i64 %tmp197 to i32
%tmp199 = add i32 %tmp198, 40
%tmp200 = urem i32 %tmp199, 101
- %tmp201 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp13, i64 0, i64 %tmp197
- store i32 %tmp200, i32* %tmp201, align 16
+ %tmp201 = getelementptr inbounds [100 x i32], ptr %tmp13, i64 0, i64 %tmp197
+ store i32 %tmp200, ptr %tmp201, align 16
%tmp202 = or i64 %tmp197, 1
%tmp203 = trunc i64 %tmp202 to i32
%tmp204 = sub i32 40, %tmp203
%tmp205 = urem i32 %tmp204, 101
- %tmp206 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp13, i64 0, i64 %tmp202
- store i32 %tmp205, i32* %tmp206, align 4
+ %tmp206 = getelementptr inbounds [100 x i32], ptr %tmp13, i64 0, i64 %tmp202
+ store i32 %tmp205, ptr %tmp206, align 4
%tmp207 = or i64 %tmp197, 2
%tmp208 = trunc i64 %tmp207 to i32
%tmp209 = add i32 %tmp208, 40
%tmp210 = urem i32 %tmp209, 101
- %tmp211 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp13, i64 0, i64 %tmp207
- store i32 %tmp210, i32* %tmp211, align 8
+ %tmp211 = getelementptr inbounds [100 x i32], ptr %tmp13, i64 0, i64 %tmp207
+ store i32 %tmp210, ptr %tmp211, align 8
%tmp212 = or i64 %tmp197, 3
%tmp213 = trunc i64 %tmp212 to i32
%tmp214 = sub i32 40, %tmp213
%tmp215 = urem i32 %tmp214, 101
- %tmp216 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp13, i64 0, i64 %tmp212
- store i32 %tmp215, i32* %tmp216, align 4
+ %tmp216 = getelementptr inbounds [100 x i32], ptr %tmp13, i64 0, i64 %tmp212
+ store i32 %tmp215, ptr %tmp216, align 4
%tmp217 = add nuw nsw i64 %tmp197, 4
%tmp218 = icmp eq i64 %tmp217, 100
br i1 %tmp218, label %bb219, label %bb196
%tmp221 = trunc i64 %tmp220 to i32
%tmp222 = add i32 %tmp221, 84
%tmp223 = urem i32 %tmp222, 101
- %tmp224 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp14, i64 0, i64 0, i64 %tmp220
- store i32 %tmp223, i32* %tmp224, align 16
+ %tmp224 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp14, i64 0, i64 0, i64 %tmp220
+ store i32 %tmp223, ptr %tmp224, align 16
%tmp225 = or i64 %tmp220, 1
%tmp226 = trunc i64 %tmp225 to i32
%tmp227 = sub i32 84, %tmp226
%tmp228 = urem i32 %tmp227, 101
- %tmp229 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp14, i64 0, i64 0, i64 %tmp225
- store i32 %tmp228, i32* %tmp229, align 4
+ %tmp229 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp14, i64 0, i64 0, i64 %tmp225
+ store i32 %tmp228, ptr %tmp229, align 4
%tmp230 = or i64 %tmp220, 2
%tmp231 = trunc i64 %tmp230 to i32
%tmp232 = add i32 %tmp231, 84
%tmp233 = urem i32 %tmp232, 101
- %tmp234 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp14, i64 0, i64 0, i64 %tmp230
- store i32 %tmp233, i32* %tmp234, align 8
+ %tmp234 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp14, i64 0, i64 0, i64 %tmp230
+ store i32 %tmp233, ptr %tmp234, align 8
%tmp235 = or i64 %tmp220, 3
%tmp236 = trunc i64 %tmp235 to i32
%tmp237 = sub i32 84, %tmp236
%tmp238 = urem i32 %tmp237, 101
- %tmp239 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp14, i64 0, i64 0, i64 %tmp235
- store i32 %tmp238, i32* %tmp239, align 4
+ %tmp239 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp14, i64 0, i64 0, i64 %tmp235
+ store i32 %tmp238, ptr %tmp239, align 4
%tmp240 = add nuw nsw i64 %tmp220, 4
%tmp241 = icmp eq i64 %tmp240, 10000
br i1 %tmp241, label %bb242, label %bb219
%tmp244 = trunc i64 %tmp243 to i32
%tmp245 = add i32 %tmp244, 94
%tmp246 = urem i32 %tmp245, 101
- %tmp247 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp15, i64 0, i64 %tmp243
- store i32 %tmp246, i32* %tmp247, align 16
+ %tmp247 = getelementptr inbounds [100 x i32], ptr %tmp15, i64 0, i64 %tmp243
+ store i32 %tmp246, ptr %tmp247, align 16
%tmp248 = or i64 %tmp243, 1
%tmp249 = trunc i64 %tmp248 to i32
%tmp250 = sub i32 94, %tmp249
%tmp251 = urem i32 %tmp250, 101
- %tmp252 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp15, i64 0, i64 %tmp248
- store i32 %tmp251, i32* %tmp252, align 4
+ %tmp252 = getelementptr inbounds [100 x i32], ptr %tmp15, i64 0, i64 %tmp248
+ store i32 %tmp251, ptr %tmp252, align 4
%tmp253 = or i64 %tmp243, 2
%tmp254 = trunc i64 %tmp253 to i32
%tmp255 = add i32 %tmp254, 94
%tmp256 = urem i32 %tmp255, 101
- %tmp257 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp15, i64 0, i64 %tmp253
- store i32 %tmp256, i32* %tmp257, align 8
+ %tmp257 = getelementptr inbounds [100 x i32], ptr %tmp15, i64 0, i64 %tmp253
+ store i32 %tmp256, ptr %tmp257, align 8
%tmp258 = or i64 %tmp243, 3
%tmp259 = trunc i64 %tmp258 to i32
%tmp260 = sub i32 94, %tmp259
%tmp261 = urem i32 %tmp260, 101
- %tmp262 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp15, i64 0, i64 %tmp258
- store i32 %tmp261, i32* %tmp262, align 4
+ %tmp262 = getelementptr inbounds [100 x i32], ptr %tmp15, i64 0, i64 %tmp258
+ store i32 %tmp261, ptr %tmp262, align 4
%tmp263 = add nuw nsw i64 %tmp243, 4
%tmp264 = icmp eq i64 %tmp263, 100
br i1 %tmp264, label %bb265, label %bb242
%tmp267 = trunc i64 %tmp266 to i32
%tmp268 = add i32 %tmp267, 92
%tmp269 = urem i32 %tmp268, 101
- %tmp270 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp16, i64 0, i64 0, i64 %tmp266
- store i32 %tmp269, i32* %tmp270, align 16
+ %tmp270 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp16, i64 0, i64 0, i64 %tmp266
+ store i32 %tmp269, ptr %tmp270, align 16
%tmp271 = or i64 %tmp266, 1
%tmp272 = trunc i64 %tmp271 to i32
%tmp273 = sub i32 92, %tmp272
%tmp274 = urem i32 %tmp273, 101
- %tmp275 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp16, i64 0, i64 0, i64 %tmp271
- store i32 %tmp274, i32* %tmp275, align 4
+ %tmp275 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp16, i64 0, i64 0, i64 %tmp271
+ store i32 %tmp274, ptr %tmp275, align 4
%tmp276 = or i64 %tmp266, 2
%tmp277 = trunc i64 %tmp276 to i32
%tmp278 = add i32 %tmp277, 92
%tmp279 = urem i32 %tmp278, 101
- %tmp280 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp16, i64 0, i64 0, i64 %tmp276
- store i32 %tmp279, i32* %tmp280, align 8
+ %tmp280 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp16, i64 0, i64 0, i64 %tmp276
+ store i32 %tmp279, ptr %tmp280, align 8
%tmp281 = or i64 %tmp266, 3
%tmp282 = trunc i64 %tmp281 to i32
%tmp283 = sub i32 92, %tmp282
%tmp284 = urem i32 %tmp283, 101
- %tmp285 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp16, i64 0, i64 0, i64 %tmp281
- store i32 %tmp284, i32* %tmp285, align 4
+ %tmp285 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp16, i64 0, i64 0, i64 %tmp281
+ store i32 %tmp284, ptr %tmp285, align 4
%tmp286 = add nuw nsw i64 %tmp266, 4
%tmp287 = icmp eq i64 %tmp286, 10000
br i1 %tmp287, label %bb288, label %bb265
%tmp290 = trunc i64 %tmp289 to i32
%tmp291 = add i32 %tmp290, 87
%tmp292 = urem i32 %tmp291, 101
- %tmp293 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp17, i64 0, i64 0, i64 %tmp289
- store i32 %tmp292, i32* %tmp293, align 16
+ %tmp293 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp17, i64 0, i64 0, i64 %tmp289
+ store i32 %tmp292, ptr %tmp293, align 16
%tmp294 = or i64 %tmp289, 1
%tmp295 = trunc i64 %tmp294 to i32
%tmp296 = sub i32 87, %tmp295
%tmp297 = urem i32 %tmp296, 101
- %tmp298 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp17, i64 0, i64 0, i64 %tmp294
- store i32 %tmp297, i32* %tmp298, align 4
+ %tmp298 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp17, i64 0, i64 0, i64 %tmp294
+ store i32 %tmp297, ptr %tmp298, align 4
%tmp299 = or i64 %tmp289, 2
%tmp300 = trunc i64 %tmp299 to i32
%tmp301 = add i32 %tmp300, 87
%tmp302 = urem i32 %tmp301, 101
- %tmp303 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp17, i64 0, i64 0, i64 %tmp299
- store i32 %tmp302, i32* %tmp303, align 8
+ %tmp303 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp17, i64 0, i64 0, i64 %tmp299
+ store i32 %tmp302, ptr %tmp303, align 8
%tmp304 = or i64 %tmp289, 3
%tmp305 = trunc i64 %tmp304 to i32
%tmp306 = sub i32 87, %tmp305
%tmp307 = urem i32 %tmp306, 101
- %tmp308 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp17, i64 0, i64 0, i64 %tmp304
- store i32 %tmp307, i32* %tmp308, align 4
+ %tmp308 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp17, i64 0, i64 0, i64 %tmp304
+ store i32 %tmp307, ptr %tmp308, align 4
%tmp309 = add nuw nsw i64 %tmp289, 4
%tmp310 = icmp eq i64 %tmp309, 10000
br i1 %tmp310, label %bb311, label %bb288
%tmp313 = trunc i64 %tmp312 to i32
%tmp314 = add i32 %tmp313, 28
%tmp315 = urem i32 %tmp314, 101
- %tmp316 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp18, i64 0, i64 0, i64 %tmp312
- store i32 %tmp315, i32* %tmp316, align 16
+ %tmp316 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp18, i64 0, i64 0, i64 %tmp312
+ store i32 %tmp315, ptr %tmp316, align 16
%tmp317 = or i64 %tmp312, 1
%tmp318 = trunc i64 %tmp317 to i32
%tmp319 = sub i32 28, %tmp318
%tmp320 = urem i32 %tmp319, 101
- %tmp321 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp18, i64 0, i64 0, i64 %tmp317
- store i32 %tmp320, i32* %tmp321, align 4
+ %tmp321 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp18, i64 0, i64 0, i64 %tmp317
+ store i32 %tmp320, ptr %tmp321, align 4
%tmp322 = or i64 %tmp312, 2
%tmp323 = trunc i64 %tmp322 to i32
%tmp324 = add i32 %tmp323, 28
%tmp325 = urem i32 %tmp324, 101
- %tmp326 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp18, i64 0, i64 0, i64 %tmp322
- store i32 %tmp325, i32* %tmp326, align 8
+ %tmp326 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp18, i64 0, i64 0, i64 %tmp322
+ store i32 %tmp325, ptr %tmp326, align 8
%tmp327 = or i64 %tmp312, 3
%tmp328 = trunc i64 %tmp327 to i32
%tmp329 = sub i32 28, %tmp328
%tmp330 = urem i32 %tmp329, 101
- %tmp331 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp18, i64 0, i64 0, i64 %tmp327
- store i32 %tmp330, i32* %tmp331, align 4
+ %tmp331 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp18, i64 0, i64 0, i64 %tmp327
+ store i32 %tmp330, ptr %tmp331, align 4
%tmp332 = add nuw nsw i64 %tmp312, 4
%tmp333 = icmp eq i64 %tmp332, 10000
br i1 %tmp333, label %bb334, label %bb311
bb334: ; preds = %bb311
%tmp335 = sub i32 87, %arg
- %tmp336 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 69
- %tmp337 = load i32, i32* %tmp336, align 4
- %tmp338 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 68
- %tmp339 = load i32, i32* %tmp338, align 16
+ %tmp336 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 69
+ %tmp337 = load i32, ptr %tmp336, align 4
+ %tmp338 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 68
+ %tmp339 = load i32, ptr %tmp338, align 16
br label %bb340
bb340: ; preds = %bb340, %bb334
%tmp344 = phi i32 [ %tmp335, %bb334 ], [ %tmp382, %bb340 ]
%tmp345 = phi i32 [ %arg2, %bb334 ], [ %tmp380, %bb340 ]
%tmp346 = add nsw i64 %tmp343, -1
- %tmp347 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 %tmp346
- %tmp348 = load i32, i32* %tmp347, align 4
+ %tmp347 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 %tmp346
+ %tmp348 = load i32, ptr %tmp347, align 4
%tmp349 = add nuw nsw i64 %tmp343, 1
- %tmp350 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 %tmp349
+ %tmp350 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 %tmp349
%tmp351 = sub i32 %tmp342, %tmp348
- store i32 %tmp351, i32* %tmp350, align 4
- %tmp352 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp7, i64 0, i64 %tmp343
- %tmp353 = load i32, i32* %tmp352, align 4
- %tmp354 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 %tmp343
+ store i32 %tmp351, ptr %tmp350, align 4
+ %tmp352 = getelementptr inbounds [100 x i32], ptr %tmp7, i64 0, i64 %tmp343
+ %tmp353 = load i32, ptr %tmp352, align 4
+ %tmp354 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 %tmp343
%tmp355 = add i32 %tmp341, %tmp353
- store i32 %tmp355, i32* %tmp354, align 4
+ store i32 %tmp355, ptr %tmp354, align 4
%tmp356 = add i32 %tmp345, -1
%tmp357 = sub i32 %tmp344, %tmp345
%tmp358 = sub i32 %tmp357, %tmp351
%tmp359 = add nsw i64 %tmp343, -2
- %tmp360 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 %tmp359
- %tmp361 = load i32, i32* %tmp360, align 4
- %tmp362 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 %tmp343
+ %tmp360 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 %tmp359
+ %tmp361 = load i32, ptr %tmp360, align 4
+ %tmp362 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 %tmp343
%tmp363 = sub i32 %tmp355, %tmp361
- store i32 %tmp363, i32* %tmp362, align 4
- %tmp364 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp7, i64 0, i64 %tmp346
- %tmp365 = load i32, i32* %tmp364, align 4
- %tmp366 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 %tmp346
+ store i32 %tmp363, ptr %tmp362, align 4
+ %tmp364 = getelementptr inbounds [100 x i32], ptr %tmp7, i64 0, i64 %tmp346
+ %tmp365 = load i32, ptr %tmp364, align 4
+ %tmp366 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 %tmp346
%tmp367 = add i32 %tmp348, %tmp365
- store i32 %tmp367, i32* %tmp366, align 4
+ store i32 %tmp367, ptr %tmp366, align 4
%tmp368 = add i32 %tmp345, -2
%tmp369 = sub i32 %tmp358, %tmp356
%tmp370 = sub i32 %tmp369, %tmp363
%tmp371 = add nsw i64 %tmp343, -3
- %tmp372 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 %tmp371
- %tmp373 = load i32, i32* %tmp372, align 4
- %tmp374 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 %tmp346
+ %tmp372 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 %tmp371
+ %tmp373 = load i32, ptr %tmp372, align 4
+ %tmp374 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 %tmp346
%tmp375 = sub i32 %tmp367, %tmp373
- store i32 %tmp375, i32* %tmp374, align 4
- %tmp376 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp7, i64 0, i64 %tmp359
- %tmp377 = load i32, i32* %tmp376, align 4
- %tmp378 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 %tmp359
+ store i32 %tmp375, ptr %tmp374, align 4
+ %tmp376 = getelementptr inbounds [100 x i32], ptr %tmp7, i64 0, i64 %tmp359
+ %tmp377 = load i32, ptr %tmp376, align 4
+ %tmp378 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 %tmp359
%tmp379 = add i32 %tmp361, %tmp377
- store i32 %tmp379, i32* %tmp378, align 4
+ store i32 %tmp379, ptr %tmp378, align 4
%tmp380 = add i32 %tmp345, -3
%tmp381 = sub i32 %tmp370, %tmp368
%tmp382 = sub i32 %tmp381, %tmp375
bb384: ; preds = %bb340
%tmp385 = add i32 %arg2, -66
- %tmp386 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp7, i64 0, i64 52
- %tmp387 = load i32, i32* %tmp386, align 16
- store i32 %tmp387, i32* %tmp32, align 4
- %tmp388 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 97
- %tmp389 = load i32, i32* %tmp388, align 4
- %tmp390 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp8, i64 0, i64 31
- %tmp391 = load i32, i32* %tmp390, align 4
+ %tmp386 = getelementptr inbounds [100 x i32], ptr %tmp7, i64 0, i64 52
+ %tmp387 = load i32, ptr %tmp386, align 16
+ store i32 %tmp387, ptr %tmp32, align 4
+ %tmp388 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 97
+ %tmp389 = load i32, ptr %tmp388, align 4
+ %tmp390 = getelementptr inbounds [100 x i32], ptr %tmp8, i64 0, i64 31
+ %tmp391 = load i32, ptr %tmp390, align 4
%tmp392 = icmp eq i32 %tmp389, %tmp391
br i1 %tmp392, label %bb478, label %bb393
bb393: ; preds = %bb384
%tmp394 = sub i32 -79, %tmp382
- %tmp395 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp12, i64 0, i64 2
- %tmp396 = bitcast i32* %tmp395 to i8*
- %tmp397 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp13, i64 0, i64 2
- %tmp398 = bitcast i32* %tmp397 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 8 %tmp396, i8* nonnull align 8 %tmp398, i64 304, i1 false)
+ %tmp395 = getelementptr inbounds [100 x i32], ptr %tmp12, i64 0, i64 2
+ %tmp397 = getelementptr inbounds [100 x i32], ptr %tmp13, i64 0, i64 2
+ call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 %tmp395, ptr nonnull align 8 %tmp397, i64 304, i1 false)
br label %bb399
bb399: ; preds = %bb424, %bb393
bb403: ; preds = %bb403, %bb399
%tmp404 = phi i64 [ 1, %bb399 ], [ %tmp414, %bb403 ]
%tmp405 = add nuw nsw i64 %tmp404, 1
- %tmp406 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp9, i64 0, i64 %tmp404, i64 %tmp405
- %tmp407 = load i32, i32* %tmp406, align 4
+ %tmp406 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp9, i64 0, i64 %tmp404, i64 %tmp405
+ %tmp407 = load i32, ptr %tmp406, align 4
%tmp408 = add i32 %tmp394, %tmp407
- store i32 %tmp408, i32* %tmp406, align 4
- %tmp409 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp11, i64 0, i64 %tmp404, i64 %tmp405
- %tmp410 = load i32, i32* %tmp409, align 4
- %tmp411 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp10, i64 0, i64 %tmp405
- %tmp412 = load i32, i32* %tmp411, align 4
+ store i32 %tmp408, ptr %tmp406, align 4
+ %tmp409 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp11, i64 0, i64 %tmp404, i64 %tmp405
+ %tmp410 = load i32, ptr %tmp409, align 4
+ %tmp411 = getelementptr inbounds [100 x i32], ptr %tmp10, i64 0, i64 %tmp405
+ %tmp412 = load i32, ptr %tmp411, align 4
%tmp413 = add i32 %tmp412, %tmp410
- store i32 %tmp413, i32* %tmp411, align 4
+ store i32 %tmp413, ptr %tmp411, align 4
%tmp414 = add nuw nsw i64 %tmp404, 2
- %tmp415 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp9, i64 0, i64 %tmp405, i64 %tmp414
- %tmp416 = load i32, i32* %tmp415, align 4
+ %tmp415 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp9, i64 0, i64 %tmp405, i64 %tmp414
+ %tmp416 = load i32, ptr %tmp415, align 4
%tmp417 = add i32 %tmp394, %tmp416
- store i32 %tmp417, i32* %tmp415, align 4
- %tmp418 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp11, i64 0, i64 %tmp405, i64 %tmp414
- %tmp419 = load i32, i32* %tmp418, align 4
- %tmp420 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp10, i64 0, i64 %tmp414
- %tmp421 = load i32, i32* %tmp420, align 4
+ store i32 %tmp417, ptr %tmp415, align 4
+ %tmp418 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp11, i64 0, i64 %tmp405, i64 %tmp414
+ %tmp419 = load i32, ptr %tmp418, align 4
+ %tmp420 = getelementptr inbounds [100 x i32], ptr %tmp10, i64 0, i64 %tmp414
+ %tmp421 = load i32, ptr %tmp420, align 4
%tmp422 = add i32 %tmp421, %tmp419
- store i32 %tmp422, i32* %tmp420, align 4
+ store i32 %tmp422, ptr %tmp420, align 4
%tmp423 = icmp eq i64 %tmp414, 47
br i1 %tmp423, label %bb424, label %bb403
bb424: ; preds = %bb403
%tmp425 = add nsw i64 %tmp400, -1
- %tmp426 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp8, i64 0, i64 %tmp425
- %tmp427 = load i32, i32* %tmp426, align 4
+ %tmp426 = getelementptr inbounds [100 x i32], ptr %tmp8, i64 0, i64 %tmp425
+ %tmp427 = load i32, ptr %tmp426, align 4
%tmp428 = add i32 %tmp427, 2
- %tmp429 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 %tmp425
- %tmp430 = load i32, i32* %tmp429, align 4
+ %tmp429 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 %tmp425
+ %tmp430 = load i32, ptr %tmp429, align 4
%tmp431 = mul i32 %tmp430, %tmp428
- store i32 %tmp431, i32* %tmp429, align 4
+ store i32 %tmp431, ptr %tmp429, align 4
%tmp432 = icmp ugt i64 %tmp425, 1
br i1 %tmp432, label %bb399, label %bb401
%tmp435 = phi i32 [ 2, %bb401 ], [ %tmp476, %bb475 ]
%tmp436 = add nsw i64 %tmp434, -1
%tmp437 = add nuw nsw i64 %tmp434, 1
- %tmp438 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp14, i64 0, i64 %tmp437, i64 %tmp434
- %tmp439 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp17, i64 0, i64 %tmp436, i64 %tmp437
+ %tmp438 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp14, i64 0, i64 %tmp437, i64 %tmp434
+ %tmp439 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp17, i64 0, i64 %tmp436, i64 %tmp437
%tmp440 = mul i32 %tmp435, 47
br label %bb441
bb441: ; preds = %bb473, %bb433
%tmp442 = phi i64 [ 1, %bb433 ], [ %tmp450, %bb473 ]
- %tmp443 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp8, i64 0, i64 %tmp442
- %tmp444 = load i32, i32* %tmp443, align 4
+ %tmp443 = getelementptr inbounds [100 x i32], ptr %tmp8, i64 0, i64 %tmp442
+ %tmp444 = load i32, ptr %tmp443, align 4
%tmp445 = add nsw i64 %tmp442, -1
- %tmp446 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp13, i64 0, i64 %tmp445
- %tmp447 = load i32, i32* %tmp446, align 4
+ %tmp446 = getelementptr inbounds [100 x i32], ptr %tmp13, i64 0, i64 %tmp445
+ %tmp447 = load i32, ptr %tmp446, align 4
%tmp448 = xor i32 %tmp444, -1
%tmp449 = add i32 %tmp447, %tmp448
- store i32 %tmp449, i32* %tmp446, align 4
+ store i32 %tmp449, ptr %tmp446, align 4
%tmp450 = add nuw nsw i64 %tmp442, 1
- %tmp451 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp14, i64 0, i64 %tmp436, i64 %tmp450
- %tmp452 = load i32, i32* %tmp451, align 4
+ %tmp451 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp14, i64 0, i64 %tmp436, i64 %tmp450
+ %tmp452 = load i32, ptr %tmp451, align 4
%tmp453 = mul i32 %tmp452, 91
%tmp454 = icmp eq i32 %tmp453, -30
br i1 %tmp454, label %bb455, label %bb473
bb455: ; preds = %bb441
- %tmp456 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp15, i64 0, i64 %tmp442
- %tmp457 = load i32, i32* %tmp456, align 4
+ %tmp456 = getelementptr inbounds [100 x i32], ptr %tmp15, i64 0, i64 %tmp442
+ %tmp457 = load i32, ptr %tmp456, align 4
%tmp458 = icmp ugt i32 %tmp457, %tmp402
br i1 %tmp458, label %bb459, label %bb473
bb459: ; preds = %bb455
- %tmp460 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp16, i64 0, i64 %tmp445, i64 %tmp436
- store i32 %tmp387, i32* %tmp460, align 4
- %tmp461 = load i32, i32* %tmp57, align 4
- %tmp462 = load i32, i32* %tmp438, align 4
+ %tmp460 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp16, i64 0, i64 %tmp445, i64 %tmp436
+ store i32 %tmp387, ptr %tmp460, align 4
+ %tmp461 = load i32, ptr %tmp57, align 4
+ %tmp462 = load i32, ptr %tmp438, align 4
%tmp463 = add i32 %tmp462, %tmp461
- %tmp464 = load i32, i32* %tmp439, align 4
+ %tmp464 = load i32, ptr %tmp439, align 4
%tmp465 = add i32 %tmp464, 68
%tmp466 = icmp eq i32 %tmp463, %tmp465
br i1 %tmp466, label %bb471, label %bb467
bb467: ; preds = %bb459
- %tmp468 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp10, i64 0, i64 %tmp450
- %tmp469 = load i32, i32* %tmp468, align 4
- %tmp470 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 %tmp445
- store i32 %tmp469, i32* %tmp470, align 4
+ %tmp468 = getelementptr inbounds [100 x i32], ptr %tmp10, i64 0, i64 %tmp450
+ %tmp469 = load i32, ptr %tmp468, align 4
+ %tmp470 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 %tmp445
+ store i32 %tmp469, ptr %tmp470, align 4
br label %bb473
bb471: ; preds = %bb459
- %tmp472 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp18, i64 0, i64 %tmp437, i64 %tmp445
- store i32 %tmp440, i32* %tmp472, align 4
+ %tmp472 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp18, i64 0, i64 %tmp437, i64 %tmp445
+ store i32 %tmp440, ptr %tmp472, align 4
br label %bb473
bb473: ; preds = %bb471, %bb467, %bb455, %bb441
%tmp481 = phi i32 [ 0, %bb478 ], [ %tmp520, %bb479 ]
%tmp482 = and i64 %tmp480, 1
%tmp483 = icmp eq i64 %tmp482, 0
- %tmp484 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 %tmp480
- %tmp485 = load i32, i32* %tmp484, align 4
+ %tmp484 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 %tmp480
+ %tmp485 = load i32, ptr %tmp484, align 4
%tmp486 = sub i32 0, %tmp485
%tmp487 = select i1 %tmp483, i32 %tmp485, i32 %tmp486
%tmp488 = add i32 %tmp487, %tmp481
%tmp489 = add nuw nsw i64 %tmp480, 1
%tmp490 = and i64 %tmp489, 1
%tmp491 = icmp eq i64 %tmp490, 0
- %tmp492 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 %tmp489
- %tmp493 = load i32, i32* %tmp492, align 4
+ %tmp492 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 %tmp489
+ %tmp493 = load i32, ptr %tmp492, align 4
%tmp494 = sub i32 0, %tmp493
%tmp495 = select i1 %tmp491, i32 %tmp493, i32 %tmp494
%tmp496 = add i32 %tmp495, %tmp488
%tmp497 = add nuw nsw i64 %tmp480, 2
%tmp498 = and i64 %tmp497, 1
%tmp499 = icmp eq i64 %tmp498, 0
- %tmp500 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 %tmp497
- %tmp501 = load i32, i32* %tmp500, align 4
+ %tmp500 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 %tmp497
+ %tmp501 = load i32, ptr %tmp500, align 4
%tmp502 = sub i32 0, %tmp501
%tmp503 = select i1 %tmp499, i32 %tmp501, i32 %tmp502
%tmp504 = add i32 %tmp503, %tmp496
%tmp505 = add nuw nsw i64 %tmp480, 3
%tmp506 = and i64 %tmp505, 1
%tmp507 = icmp eq i64 %tmp506, 0
- %tmp508 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 %tmp505
- %tmp509 = load i32, i32* %tmp508, align 4
+ %tmp508 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 %tmp505
+ %tmp509 = load i32, ptr %tmp508, align 4
%tmp510 = sub i32 0, %tmp509
%tmp511 = select i1 %tmp507, i32 %tmp509, i32 %tmp510
%tmp512 = add i32 %tmp511, %tmp504
%tmp513 = add nuw nsw i64 %tmp480, 4
%tmp514 = and i64 %tmp513, 1
%tmp515 = icmp eq i64 %tmp514, 0
- %tmp516 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp, i64 0, i64 %tmp513
- %tmp517 = load i32, i32* %tmp516, align 4
+ %tmp516 = getelementptr inbounds [100 x i32], ptr %tmp, i64 0, i64 %tmp513
+ %tmp517 = load i32, ptr %tmp516, align 4
%tmp518 = sub i32 0, %tmp517
%tmp519 = select i1 %tmp515, i32 %tmp517, i32 %tmp518
%tmp520 = add i32 %tmp519, %tmp512
%tmp525 = phi i32 [ %tmp564, %bb523 ], [ 0, %bb479 ]
%tmp526 = and i64 %tmp524, 1
%tmp527 = icmp eq i64 %tmp526, 0
- %tmp528 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp7, i64 0, i64 %tmp524
- %tmp529 = load i32, i32* %tmp528, align 4
+ %tmp528 = getelementptr inbounds [100 x i32], ptr %tmp7, i64 0, i64 %tmp524
+ %tmp529 = load i32, ptr %tmp528, align 4
%tmp530 = sub i32 0, %tmp529
%tmp531 = select i1 %tmp527, i32 %tmp529, i32 %tmp530
%tmp532 = add i32 %tmp531, %tmp525
%tmp533 = add nuw nsw i64 %tmp524, 1
%tmp534 = and i64 %tmp533, 1
%tmp535 = icmp eq i64 %tmp534, 0
- %tmp536 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp7, i64 0, i64 %tmp533
- %tmp537 = load i32, i32* %tmp536, align 4
+ %tmp536 = getelementptr inbounds [100 x i32], ptr %tmp7, i64 0, i64 %tmp533
+ %tmp537 = load i32, ptr %tmp536, align 4
%tmp538 = sub i32 0, %tmp537
%tmp539 = select i1 %tmp535, i32 %tmp537, i32 %tmp538
%tmp540 = add i32 %tmp539, %tmp532
%tmp541 = add nuw nsw i64 %tmp524, 2
%tmp542 = and i64 %tmp541, 1
%tmp543 = icmp eq i64 %tmp542, 0
- %tmp544 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp7, i64 0, i64 %tmp541
- %tmp545 = load i32, i32* %tmp544, align 4
+ %tmp544 = getelementptr inbounds [100 x i32], ptr %tmp7, i64 0, i64 %tmp541
+ %tmp545 = load i32, ptr %tmp544, align 4
%tmp546 = sub i32 0, %tmp545
%tmp547 = select i1 %tmp543, i32 %tmp545, i32 %tmp546
%tmp548 = add i32 %tmp547, %tmp540
%tmp549 = add nuw nsw i64 %tmp524, 3
%tmp550 = and i64 %tmp549, 1
%tmp551 = icmp eq i64 %tmp550, 0
- %tmp552 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp7, i64 0, i64 %tmp549
- %tmp553 = load i32, i32* %tmp552, align 4
+ %tmp552 = getelementptr inbounds [100 x i32], ptr %tmp7, i64 0, i64 %tmp549
+ %tmp553 = load i32, ptr %tmp552, align 4
%tmp554 = sub i32 0, %tmp553
%tmp555 = select i1 %tmp551, i32 %tmp553, i32 %tmp554
%tmp556 = add i32 %tmp555, %tmp548
%tmp557 = add nuw nsw i64 %tmp524, 4
%tmp558 = and i64 %tmp557, 1
%tmp559 = icmp eq i64 %tmp558, 0
- %tmp560 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp7, i64 0, i64 %tmp557
- %tmp561 = load i32, i32* %tmp560, align 4
+ %tmp560 = getelementptr inbounds [100 x i32], ptr %tmp7, i64 0, i64 %tmp557
+ %tmp561 = load i32, ptr %tmp560, align 4
%tmp562 = sub i32 0, %tmp561
%tmp563 = select i1 %tmp559, i32 %tmp561, i32 %tmp562
%tmp564 = add i32 %tmp563, %tmp556
%tmp569 = phi i32 [ %tmp608, %bb567 ], [ 0, %bb523 ]
%tmp570 = and i64 %tmp568, 1
%tmp571 = icmp eq i64 %tmp570, 0
- %tmp572 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp8, i64 0, i64 %tmp568
- %tmp573 = load i32, i32* %tmp572, align 4
+ %tmp572 = getelementptr inbounds [100 x i32], ptr %tmp8, i64 0, i64 %tmp568
+ %tmp573 = load i32, ptr %tmp572, align 4
%tmp574 = sub i32 0, %tmp573
%tmp575 = select i1 %tmp571, i32 %tmp573, i32 %tmp574
%tmp576 = add i32 %tmp575, %tmp569
%tmp577 = add nuw nsw i64 %tmp568, 1
%tmp578 = and i64 %tmp577, 1
%tmp579 = icmp eq i64 %tmp578, 0
- %tmp580 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp8, i64 0, i64 %tmp577
- %tmp581 = load i32, i32* %tmp580, align 4
+ %tmp580 = getelementptr inbounds [100 x i32], ptr %tmp8, i64 0, i64 %tmp577
+ %tmp581 = load i32, ptr %tmp580, align 4
%tmp582 = sub i32 0, %tmp581
%tmp583 = select i1 %tmp579, i32 %tmp581, i32 %tmp582
%tmp584 = add i32 %tmp583, %tmp576
%tmp585 = add nuw nsw i64 %tmp568, 2
%tmp586 = and i64 %tmp585, 1
%tmp587 = icmp eq i64 %tmp586, 0
- %tmp588 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp8, i64 0, i64 %tmp585
- %tmp589 = load i32, i32* %tmp588, align 4
+ %tmp588 = getelementptr inbounds [100 x i32], ptr %tmp8, i64 0, i64 %tmp585
+ %tmp589 = load i32, ptr %tmp588, align 4
%tmp590 = sub i32 0, %tmp589
%tmp591 = select i1 %tmp587, i32 %tmp589, i32 %tmp590
%tmp592 = add i32 %tmp591, %tmp584
%tmp593 = add nuw nsw i64 %tmp568, 3
%tmp594 = and i64 %tmp593, 1
%tmp595 = icmp eq i64 %tmp594, 0
- %tmp596 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp8, i64 0, i64 %tmp593
- %tmp597 = load i32, i32* %tmp596, align 4
+ %tmp596 = getelementptr inbounds [100 x i32], ptr %tmp8, i64 0, i64 %tmp593
+ %tmp597 = load i32, ptr %tmp596, align 4
%tmp598 = sub i32 0, %tmp597
%tmp599 = select i1 %tmp595, i32 %tmp597, i32 %tmp598
%tmp600 = add i32 %tmp599, %tmp592
%tmp601 = add nuw nsw i64 %tmp568, 4
%tmp602 = and i64 %tmp601, 1
%tmp603 = icmp eq i64 %tmp602, 0
- %tmp604 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp8, i64 0, i64 %tmp601
- %tmp605 = load i32, i32* %tmp604, align 4
+ %tmp604 = getelementptr inbounds [100 x i32], ptr %tmp8, i64 0, i64 %tmp601
+ %tmp605 = load i32, ptr %tmp604, align 4
%tmp606 = sub i32 0, %tmp605
%tmp607 = select i1 %tmp603, i32 %tmp605, i32 %tmp606
%tmp608 = add i32 %tmp607, %tmp600
%tmp613 = phi i32 [ %tmp652, %bb611 ], [ 0, %bb567 ]
%tmp614 = and i64 %tmp612, 1
%tmp615 = icmp eq i64 %tmp614, 0
- %tmp616 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp9, i64 0, i64 0, i64 %tmp612
- %tmp617 = load i32, i32* %tmp616, align 4
+ %tmp616 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp9, i64 0, i64 0, i64 %tmp612
+ %tmp617 = load i32, ptr %tmp616, align 4
%tmp618 = sub i32 0, %tmp617
%tmp619 = select i1 %tmp615, i32 %tmp617, i32 %tmp618
%tmp620 = add i32 %tmp619, %tmp613
%tmp621 = add nuw nsw i64 %tmp612, 1
%tmp622 = and i64 %tmp621, 1
%tmp623 = icmp eq i64 %tmp622, 0
- %tmp624 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp9, i64 0, i64 0, i64 %tmp621
- %tmp625 = load i32, i32* %tmp624, align 4
+ %tmp624 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp9, i64 0, i64 0, i64 %tmp621
+ %tmp625 = load i32, ptr %tmp624, align 4
%tmp626 = sub i32 0, %tmp625
%tmp627 = select i1 %tmp623, i32 %tmp625, i32 %tmp626
%tmp628 = add i32 %tmp627, %tmp620
%tmp629 = add nuw nsw i64 %tmp612, 2
%tmp630 = and i64 %tmp629, 1
%tmp631 = icmp eq i64 %tmp630, 0
- %tmp632 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp9, i64 0, i64 0, i64 %tmp629
- %tmp633 = load i32, i32* %tmp632, align 4
+ %tmp632 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp9, i64 0, i64 0, i64 %tmp629
+ %tmp633 = load i32, ptr %tmp632, align 4
%tmp634 = sub i32 0, %tmp633
%tmp635 = select i1 %tmp631, i32 %tmp633, i32 %tmp634
%tmp636 = add i32 %tmp635, %tmp628
%tmp637 = add nuw nsw i64 %tmp612, 3
%tmp638 = and i64 %tmp637, 1
%tmp639 = icmp eq i64 %tmp638, 0
- %tmp640 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp9, i64 0, i64 0, i64 %tmp637
- %tmp641 = load i32, i32* %tmp640, align 4
+ %tmp640 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp9, i64 0, i64 0, i64 %tmp637
+ %tmp641 = load i32, ptr %tmp640, align 4
%tmp642 = sub i32 0, %tmp641
%tmp643 = select i1 %tmp639, i32 %tmp641, i32 %tmp642
%tmp644 = add i32 %tmp643, %tmp636
%tmp645 = add nuw nsw i64 %tmp612, 4
%tmp646 = and i64 %tmp645, 1
%tmp647 = icmp eq i64 %tmp646, 0
- %tmp648 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp9, i64 0, i64 0, i64 %tmp645
- %tmp649 = load i32, i32* %tmp648, align 4
+ %tmp648 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp9, i64 0, i64 0, i64 %tmp645
+ %tmp649 = load i32, ptr %tmp648, align 4
%tmp650 = sub i32 0, %tmp649
%tmp651 = select i1 %tmp647, i32 %tmp649, i32 %tmp650
%tmp652 = add i32 %tmp651, %tmp644
%tmp657 = phi i32 [ %tmp696, %bb655 ], [ 0, %bb611 ]
%tmp658 = and i64 %tmp656, 1
%tmp659 = icmp eq i64 %tmp658, 0
- %tmp660 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp10, i64 0, i64 %tmp656
- %tmp661 = load i32, i32* %tmp660, align 4
+ %tmp660 = getelementptr inbounds [100 x i32], ptr %tmp10, i64 0, i64 %tmp656
+ %tmp661 = load i32, ptr %tmp660, align 4
%tmp662 = sub i32 0, %tmp661
%tmp663 = select i1 %tmp659, i32 %tmp661, i32 %tmp662
%tmp664 = add i32 %tmp663, %tmp657
%tmp665 = add nuw nsw i64 %tmp656, 1
%tmp666 = and i64 %tmp665, 1
%tmp667 = icmp eq i64 %tmp666, 0
- %tmp668 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp10, i64 0, i64 %tmp665
- %tmp669 = load i32, i32* %tmp668, align 4
+ %tmp668 = getelementptr inbounds [100 x i32], ptr %tmp10, i64 0, i64 %tmp665
+ %tmp669 = load i32, ptr %tmp668, align 4
%tmp670 = sub i32 0, %tmp669
%tmp671 = select i1 %tmp667, i32 %tmp669, i32 %tmp670
%tmp672 = add i32 %tmp671, %tmp664
%tmp673 = add nuw nsw i64 %tmp656, 2
%tmp674 = and i64 %tmp673, 1
%tmp675 = icmp eq i64 %tmp674, 0
- %tmp676 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp10, i64 0, i64 %tmp673
- %tmp677 = load i32, i32* %tmp676, align 4
+ %tmp676 = getelementptr inbounds [100 x i32], ptr %tmp10, i64 0, i64 %tmp673
+ %tmp677 = load i32, ptr %tmp676, align 4
%tmp678 = sub i32 0, %tmp677
%tmp679 = select i1 %tmp675, i32 %tmp677, i32 %tmp678
%tmp680 = add i32 %tmp679, %tmp672
%tmp681 = add nuw nsw i64 %tmp656, 3
%tmp682 = and i64 %tmp681, 1
%tmp683 = icmp eq i64 %tmp682, 0
- %tmp684 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp10, i64 0, i64 %tmp681
- %tmp685 = load i32, i32* %tmp684, align 4
+ %tmp684 = getelementptr inbounds [100 x i32], ptr %tmp10, i64 0, i64 %tmp681
+ %tmp685 = load i32, ptr %tmp684, align 4
%tmp686 = sub i32 0, %tmp685
%tmp687 = select i1 %tmp683, i32 %tmp685, i32 %tmp686
%tmp688 = add i32 %tmp687, %tmp680
%tmp689 = add nuw nsw i64 %tmp656, 4
%tmp690 = and i64 %tmp689, 1
%tmp691 = icmp eq i64 %tmp690, 0
- %tmp692 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp10, i64 0, i64 %tmp689
- %tmp693 = load i32, i32* %tmp692, align 4
+ %tmp692 = getelementptr inbounds [100 x i32], ptr %tmp10, i64 0, i64 %tmp689
+ %tmp693 = load i32, ptr %tmp692, align 4
%tmp694 = sub i32 0, %tmp693
%tmp695 = select i1 %tmp691, i32 %tmp693, i32 %tmp694
%tmp696 = add i32 %tmp695, %tmp688
%tmp701 = phi i32 [ %tmp740, %bb699 ], [ 0, %bb655 ]
%tmp702 = and i64 %tmp700, 1
%tmp703 = icmp eq i64 %tmp702, 0
- %tmp704 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp11, i64 0, i64 0, i64 %tmp700
- %tmp705 = load i32, i32* %tmp704, align 4
+ %tmp704 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp11, i64 0, i64 0, i64 %tmp700
+ %tmp705 = load i32, ptr %tmp704, align 4
%tmp706 = sub i32 0, %tmp705
%tmp707 = select i1 %tmp703, i32 %tmp705, i32 %tmp706
%tmp708 = add i32 %tmp707, %tmp701
%tmp709 = add nuw nsw i64 %tmp700, 1
%tmp710 = and i64 %tmp709, 1
%tmp711 = icmp eq i64 %tmp710, 0
- %tmp712 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp11, i64 0, i64 0, i64 %tmp709
- %tmp713 = load i32, i32* %tmp712, align 4
+ %tmp712 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp11, i64 0, i64 0, i64 %tmp709
+ %tmp713 = load i32, ptr %tmp712, align 4
%tmp714 = sub i32 0, %tmp713
%tmp715 = select i1 %tmp711, i32 %tmp713, i32 %tmp714
%tmp716 = add i32 %tmp715, %tmp708
%tmp717 = add nuw nsw i64 %tmp700, 2
%tmp718 = and i64 %tmp717, 1
%tmp719 = icmp eq i64 %tmp718, 0
- %tmp720 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp11, i64 0, i64 0, i64 %tmp717
- %tmp721 = load i32, i32* %tmp720, align 4
+ %tmp720 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp11, i64 0, i64 0, i64 %tmp717
+ %tmp721 = load i32, ptr %tmp720, align 4
%tmp722 = sub i32 0, %tmp721
%tmp723 = select i1 %tmp719, i32 %tmp721, i32 %tmp722
%tmp724 = add i32 %tmp723, %tmp716
%tmp725 = add nuw nsw i64 %tmp700, 3
%tmp726 = and i64 %tmp725, 1
%tmp727 = icmp eq i64 %tmp726, 0
- %tmp728 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp11, i64 0, i64 0, i64 %tmp725
- %tmp729 = load i32, i32* %tmp728, align 4
+ %tmp728 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp11, i64 0, i64 0, i64 %tmp725
+ %tmp729 = load i32, ptr %tmp728, align 4
%tmp730 = sub i32 0, %tmp729
%tmp731 = select i1 %tmp727, i32 %tmp729, i32 %tmp730
%tmp732 = add i32 %tmp731, %tmp724
%tmp733 = add nuw nsw i64 %tmp700, 4
%tmp734 = and i64 %tmp733, 1
%tmp735 = icmp eq i64 %tmp734, 0
- %tmp736 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp11, i64 0, i64 0, i64 %tmp733
- %tmp737 = load i32, i32* %tmp736, align 4
+ %tmp736 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp11, i64 0, i64 0, i64 %tmp733
+ %tmp737 = load i32, ptr %tmp736, align 4
%tmp738 = sub i32 0, %tmp737
%tmp739 = select i1 %tmp735, i32 %tmp737, i32 %tmp738
%tmp740 = add i32 %tmp739, %tmp732
%tmp745 = phi i32 [ %tmp784, %bb743 ], [ 0, %bb699 ]
%tmp746 = and i64 %tmp744, 1
%tmp747 = icmp eq i64 %tmp746, 0
- %tmp748 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp12, i64 0, i64 %tmp744
- %tmp749 = load i32, i32* %tmp748, align 4
+ %tmp748 = getelementptr inbounds [100 x i32], ptr %tmp12, i64 0, i64 %tmp744
+ %tmp749 = load i32, ptr %tmp748, align 4
%tmp750 = sub i32 0, %tmp749
%tmp751 = select i1 %tmp747, i32 %tmp749, i32 %tmp750
%tmp752 = add i32 %tmp751, %tmp745
%tmp753 = add nuw nsw i64 %tmp744, 1
%tmp754 = and i64 %tmp753, 1
%tmp755 = icmp eq i64 %tmp754, 0
- %tmp756 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp12, i64 0, i64 %tmp753
- %tmp757 = load i32, i32* %tmp756, align 4
+ %tmp756 = getelementptr inbounds [100 x i32], ptr %tmp12, i64 0, i64 %tmp753
+ %tmp757 = load i32, ptr %tmp756, align 4
%tmp758 = sub i32 0, %tmp757
%tmp759 = select i1 %tmp755, i32 %tmp757, i32 %tmp758
%tmp760 = add i32 %tmp759, %tmp752
%tmp761 = add nuw nsw i64 %tmp744, 2
%tmp762 = and i64 %tmp761, 1
%tmp763 = icmp eq i64 %tmp762, 0
- %tmp764 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp12, i64 0, i64 %tmp761
- %tmp765 = load i32, i32* %tmp764, align 4
+ %tmp764 = getelementptr inbounds [100 x i32], ptr %tmp12, i64 0, i64 %tmp761
+ %tmp765 = load i32, ptr %tmp764, align 4
%tmp766 = sub i32 0, %tmp765
%tmp767 = select i1 %tmp763, i32 %tmp765, i32 %tmp766
%tmp768 = add i32 %tmp767, %tmp760
%tmp769 = add nuw nsw i64 %tmp744, 3
%tmp770 = and i64 %tmp769, 1
%tmp771 = icmp eq i64 %tmp770, 0
- %tmp772 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp12, i64 0, i64 %tmp769
- %tmp773 = load i32, i32* %tmp772, align 4
+ %tmp772 = getelementptr inbounds [100 x i32], ptr %tmp12, i64 0, i64 %tmp769
+ %tmp773 = load i32, ptr %tmp772, align 4
%tmp774 = sub i32 0, %tmp773
%tmp775 = select i1 %tmp771, i32 %tmp773, i32 %tmp774
%tmp776 = add i32 %tmp775, %tmp768
%tmp777 = add nuw nsw i64 %tmp744, 4
%tmp778 = and i64 %tmp777, 1
%tmp779 = icmp eq i64 %tmp778, 0
- %tmp780 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp12, i64 0, i64 %tmp777
- %tmp781 = load i32, i32* %tmp780, align 4
+ %tmp780 = getelementptr inbounds [100 x i32], ptr %tmp12, i64 0, i64 %tmp777
+ %tmp781 = load i32, ptr %tmp780, align 4
%tmp782 = sub i32 0, %tmp781
%tmp783 = select i1 %tmp779, i32 %tmp781, i32 %tmp782
%tmp784 = add i32 %tmp783, %tmp776
%tmp789 = phi i32 [ %tmp828, %bb787 ], [ 0, %bb743 ]
%tmp790 = and i64 %tmp788, 1
%tmp791 = icmp eq i64 %tmp790, 0
- %tmp792 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp13, i64 0, i64 %tmp788
- %tmp793 = load i32, i32* %tmp792, align 4
+ %tmp792 = getelementptr inbounds [100 x i32], ptr %tmp13, i64 0, i64 %tmp788
+ %tmp793 = load i32, ptr %tmp792, align 4
%tmp794 = sub i32 0, %tmp793
%tmp795 = select i1 %tmp791, i32 %tmp793, i32 %tmp794
%tmp796 = add i32 %tmp795, %tmp789
%tmp797 = add nuw nsw i64 %tmp788, 1
%tmp798 = and i64 %tmp797, 1
%tmp799 = icmp eq i64 %tmp798, 0
- %tmp800 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp13, i64 0, i64 %tmp797
- %tmp801 = load i32, i32* %tmp800, align 4
+ %tmp800 = getelementptr inbounds [100 x i32], ptr %tmp13, i64 0, i64 %tmp797
+ %tmp801 = load i32, ptr %tmp800, align 4
%tmp802 = sub i32 0, %tmp801
%tmp803 = select i1 %tmp799, i32 %tmp801, i32 %tmp802
%tmp804 = add i32 %tmp803, %tmp796
%tmp805 = add nuw nsw i64 %tmp788, 2
%tmp806 = and i64 %tmp805, 1
%tmp807 = icmp eq i64 %tmp806, 0
- %tmp808 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp13, i64 0, i64 %tmp805
- %tmp809 = load i32, i32* %tmp808, align 4
+ %tmp808 = getelementptr inbounds [100 x i32], ptr %tmp13, i64 0, i64 %tmp805
+ %tmp809 = load i32, ptr %tmp808, align 4
%tmp810 = sub i32 0, %tmp809
%tmp811 = select i1 %tmp807, i32 %tmp809, i32 %tmp810
%tmp812 = add i32 %tmp811, %tmp804
%tmp813 = add nuw nsw i64 %tmp788, 3
%tmp814 = and i64 %tmp813, 1
%tmp815 = icmp eq i64 %tmp814, 0
- %tmp816 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp13, i64 0, i64 %tmp813
- %tmp817 = load i32, i32* %tmp816, align 4
+ %tmp816 = getelementptr inbounds [100 x i32], ptr %tmp13, i64 0, i64 %tmp813
+ %tmp817 = load i32, ptr %tmp816, align 4
%tmp818 = sub i32 0, %tmp817
%tmp819 = select i1 %tmp815, i32 %tmp817, i32 %tmp818
%tmp820 = add i32 %tmp819, %tmp812
%tmp821 = add nuw nsw i64 %tmp788, 4
%tmp822 = and i64 %tmp821, 1
%tmp823 = icmp eq i64 %tmp822, 0
- %tmp824 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp13, i64 0, i64 %tmp821
- %tmp825 = load i32, i32* %tmp824, align 4
+ %tmp824 = getelementptr inbounds [100 x i32], ptr %tmp13, i64 0, i64 %tmp821
+ %tmp825 = load i32, ptr %tmp824, align 4
%tmp826 = sub i32 0, %tmp825
%tmp827 = select i1 %tmp823, i32 %tmp825, i32 %tmp826
%tmp828 = add i32 %tmp827, %tmp820
%tmp833 = phi i32 [ %tmp872, %bb831 ], [ 0, %bb787 ]
%tmp834 = and i64 %tmp832, 1
%tmp835 = icmp eq i64 %tmp834, 0
- %tmp836 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp14, i64 0, i64 0, i64 %tmp832
- %tmp837 = load i32, i32* %tmp836, align 4
+ %tmp836 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp14, i64 0, i64 0, i64 %tmp832
+ %tmp837 = load i32, ptr %tmp836, align 4
%tmp838 = sub i32 0, %tmp837
%tmp839 = select i1 %tmp835, i32 %tmp837, i32 %tmp838
%tmp840 = add i32 %tmp839, %tmp833
%tmp841 = add nuw nsw i64 %tmp832, 1
%tmp842 = and i64 %tmp841, 1
%tmp843 = icmp eq i64 %tmp842, 0
- %tmp844 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp14, i64 0, i64 0, i64 %tmp841
- %tmp845 = load i32, i32* %tmp844, align 4
+ %tmp844 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp14, i64 0, i64 0, i64 %tmp841
+ %tmp845 = load i32, ptr %tmp844, align 4
%tmp846 = sub i32 0, %tmp845
%tmp847 = select i1 %tmp843, i32 %tmp845, i32 %tmp846
%tmp848 = add i32 %tmp847, %tmp840
%tmp849 = add nuw nsw i64 %tmp832, 2
%tmp850 = and i64 %tmp849, 1
%tmp851 = icmp eq i64 %tmp850, 0
- %tmp852 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp14, i64 0, i64 0, i64 %tmp849
- %tmp853 = load i32, i32* %tmp852, align 4
+ %tmp852 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp14, i64 0, i64 0, i64 %tmp849
+ %tmp853 = load i32, ptr %tmp852, align 4
%tmp854 = sub i32 0, %tmp853
%tmp855 = select i1 %tmp851, i32 %tmp853, i32 %tmp854
%tmp856 = add i32 %tmp855, %tmp848
%tmp857 = add nuw nsw i64 %tmp832, 3
%tmp858 = and i64 %tmp857, 1
%tmp859 = icmp eq i64 %tmp858, 0
- %tmp860 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp14, i64 0, i64 0, i64 %tmp857
- %tmp861 = load i32, i32* %tmp860, align 4
+ %tmp860 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp14, i64 0, i64 0, i64 %tmp857
+ %tmp861 = load i32, ptr %tmp860, align 4
%tmp862 = sub i32 0, %tmp861
%tmp863 = select i1 %tmp859, i32 %tmp861, i32 %tmp862
%tmp864 = add i32 %tmp863, %tmp856
%tmp865 = add nuw nsw i64 %tmp832, 4
%tmp866 = and i64 %tmp865, 1
%tmp867 = icmp eq i64 %tmp866, 0
- %tmp868 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp14, i64 0, i64 0, i64 %tmp865
- %tmp869 = load i32, i32* %tmp868, align 4
+ %tmp868 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp14, i64 0, i64 0, i64 %tmp865
+ %tmp869 = load i32, ptr %tmp868, align 4
%tmp870 = sub i32 0, %tmp869
%tmp871 = select i1 %tmp867, i32 %tmp869, i32 %tmp870
%tmp872 = add i32 %tmp871, %tmp864
%tmp877 = phi i32 [ %tmp916, %bb875 ], [ 0, %bb831 ]
%tmp878 = and i64 %tmp876, 1
%tmp879 = icmp eq i64 %tmp878, 0
- %tmp880 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp15, i64 0, i64 %tmp876
- %tmp881 = load i32, i32* %tmp880, align 4
+ %tmp880 = getelementptr inbounds [100 x i32], ptr %tmp15, i64 0, i64 %tmp876
+ %tmp881 = load i32, ptr %tmp880, align 4
%tmp882 = sub i32 0, %tmp881
%tmp883 = select i1 %tmp879, i32 %tmp881, i32 %tmp882
%tmp884 = add i32 %tmp883, %tmp877
%tmp885 = add nuw nsw i64 %tmp876, 1
%tmp886 = and i64 %tmp885, 1
%tmp887 = icmp eq i64 %tmp886, 0
- %tmp888 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp15, i64 0, i64 %tmp885
- %tmp889 = load i32, i32* %tmp888, align 4
+ %tmp888 = getelementptr inbounds [100 x i32], ptr %tmp15, i64 0, i64 %tmp885
+ %tmp889 = load i32, ptr %tmp888, align 4
%tmp890 = sub i32 0, %tmp889
%tmp891 = select i1 %tmp887, i32 %tmp889, i32 %tmp890
%tmp892 = add i32 %tmp891, %tmp884
%tmp893 = add nuw nsw i64 %tmp876, 2
%tmp894 = and i64 %tmp893, 1
%tmp895 = icmp eq i64 %tmp894, 0
- %tmp896 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp15, i64 0, i64 %tmp893
- %tmp897 = load i32, i32* %tmp896, align 4
+ %tmp896 = getelementptr inbounds [100 x i32], ptr %tmp15, i64 0, i64 %tmp893
+ %tmp897 = load i32, ptr %tmp896, align 4
%tmp898 = sub i32 0, %tmp897
%tmp899 = select i1 %tmp895, i32 %tmp897, i32 %tmp898
%tmp900 = add i32 %tmp899, %tmp892
%tmp901 = add nuw nsw i64 %tmp876, 3
%tmp902 = and i64 %tmp901, 1
%tmp903 = icmp eq i64 %tmp902, 0
- %tmp904 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp15, i64 0, i64 %tmp901
- %tmp905 = load i32, i32* %tmp904, align 4
+ %tmp904 = getelementptr inbounds [100 x i32], ptr %tmp15, i64 0, i64 %tmp901
+ %tmp905 = load i32, ptr %tmp904, align 4
%tmp906 = sub i32 0, %tmp905
%tmp907 = select i1 %tmp903, i32 %tmp905, i32 %tmp906
%tmp908 = add i32 %tmp907, %tmp900
%tmp909 = add nuw nsw i64 %tmp876, 4
%tmp910 = and i64 %tmp909, 1
%tmp911 = icmp eq i64 %tmp910, 0
- %tmp912 = getelementptr inbounds [100 x i32], [100 x i32]* %tmp15, i64 0, i64 %tmp909
- %tmp913 = load i32, i32* %tmp912, align 4
+ %tmp912 = getelementptr inbounds [100 x i32], ptr %tmp15, i64 0, i64 %tmp909
+ %tmp913 = load i32, ptr %tmp912, align 4
%tmp914 = sub i32 0, %tmp913
%tmp915 = select i1 %tmp911, i32 %tmp913, i32 %tmp914
%tmp916 = add i32 %tmp915, %tmp908
%tmp921 = phi i32 [ %tmp960, %bb919 ], [ 0, %bb875 ]
%tmp922 = and i64 %tmp920, 1
%tmp923 = icmp eq i64 %tmp922, 0
- %tmp924 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp16, i64 0, i64 0, i64 %tmp920
- %tmp925 = load i32, i32* %tmp924, align 4
+ %tmp924 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp16, i64 0, i64 0, i64 %tmp920
+ %tmp925 = load i32, ptr %tmp924, align 4
%tmp926 = sub i32 0, %tmp925
%tmp927 = select i1 %tmp923, i32 %tmp925, i32 %tmp926
%tmp928 = add i32 %tmp927, %tmp921
%tmp929 = add nuw nsw i64 %tmp920, 1
%tmp930 = and i64 %tmp929, 1
%tmp931 = icmp eq i64 %tmp930, 0
- %tmp932 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp16, i64 0, i64 0, i64 %tmp929
- %tmp933 = load i32, i32* %tmp932, align 4
+ %tmp932 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp16, i64 0, i64 0, i64 %tmp929
+ %tmp933 = load i32, ptr %tmp932, align 4
%tmp934 = sub i32 0, %tmp933
%tmp935 = select i1 %tmp931, i32 %tmp933, i32 %tmp934
%tmp936 = add i32 %tmp935, %tmp928
%tmp937 = add nuw nsw i64 %tmp920, 2
%tmp938 = and i64 %tmp937, 1
%tmp939 = icmp eq i64 %tmp938, 0
- %tmp940 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp16, i64 0, i64 0, i64 %tmp937
- %tmp941 = load i32, i32* %tmp940, align 4
+ %tmp940 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp16, i64 0, i64 0, i64 %tmp937
+ %tmp941 = load i32, ptr %tmp940, align 4
%tmp942 = sub i32 0, %tmp941
%tmp943 = select i1 %tmp939, i32 %tmp941, i32 %tmp942
%tmp944 = add i32 %tmp943, %tmp936
%tmp945 = add nuw nsw i64 %tmp920, 3
%tmp946 = and i64 %tmp945, 1
%tmp947 = icmp eq i64 %tmp946, 0
- %tmp948 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp16, i64 0, i64 0, i64 %tmp945
- %tmp949 = load i32, i32* %tmp948, align 4
+ %tmp948 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp16, i64 0, i64 0, i64 %tmp945
+ %tmp949 = load i32, ptr %tmp948, align 4
%tmp950 = sub i32 0, %tmp949
%tmp951 = select i1 %tmp947, i32 %tmp949, i32 %tmp950
%tmp952 = add i32 %tmp951, %tmp944
%tmp953 = add nuw nsw i64 %tmp920, 4
%tmp954 = and i64 %tmp953, 1
%tmp955 = icmp eq i64 %tmp954, 0
- %tmp956 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp16, i64 0, i64 0, i64 %tmp953
- %tmp957 = load i32, i32* %tmp956, align 4
+ %tmp956 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp16, i64 0, i64 0, i64 %tmp953
+ %tmp957 = load i32, ptr %tmp956, align 4
%tmp958 = sub i32 0, %tmp957
%tmp959 = select i1 %tmp955, i32 %tmp957, i32 %tmp958
%tmp960 = add i32 %tmp959, %tmp952
%tmp965 = phi i32 [ %tmp1004, %bb963 ], [ 0, %bb919 ]
%tmp966 = and i64 %tmp964, 1
%tmp967 = icmp eq i64 %tmp966, 0
- %tmp968 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp17, i64 0, i64 0, i64 %tmp964
- %tmp969 = load i32, i32* %tmp968, align 4
+ %tmp968 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp17, i64 0, i64 0, i64 %tmp964
+ %tmp969 = load i32, ptr %tmp968, align 4
%tmp970 = sub i32 0, %tmp969
%tmp971 = select i1 %tmp967, i32 %tmp969, i32 %tmp970
%tmp972 = add i32 %tmp971, %tmp965
%tmp973 = add nuw nsw i64 %tmp964, 1
%tmp974 = and i64 %tmp973, 1
%tmp975 = icmp eq i64 %tmp974, 0
- %tmp976 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp17, i64 0, i64 0, i64 %tmp973
- %tmp977 = load i32, i32* %tmp976, align 4
+ %tmp976 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp17, i64 0, i64 0, i64 %tmp973
+ %tmp977 = load i32, ptr %tmp976, align 4
%tmp978 = sub i32 0, %tmp977
%tmp979 = select i1 %tmp975, i32 %tmp977, i32 %tmp978
%tmp980 = add i32 %tmp979, %tmp972
%tmp981 = add nuw nsw i64 %tmp964, 2
%tmp982 = and i64 %tmp981, 1
%tmp983 = icmp eq i64 %tmp982, 0
- %tmp984 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp17, i64 0, i64 0, i64 %tmp981
- %tmp985 = load i32, i32* %tmp984, align 4
+ %tmp984 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp17, i64 0, i64 0, i64 %tmp981
+ %tmp985 = load i32, ptr %tmp984, align 4
%tmp986 = sub i32 0, %tmp985
%tmp987 = select i1 %tmp983, i32 %tmp985, i32 %tmp986
%tmp988 = add i32 %tmp987, %tmp980
%tmp989 = add nuw nsw i64 %tmp964, 3
%tmp990 = and i64 %tmp989, 1
%tmp991 = icmp eq i64 %tmp990, 0
- %tmp992 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp17, i64 0, i64 0, i64 %tmp989
- %tmp993 = load i32, i32* %tmp992, align 4
+ %tmp992 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp17, i64 0, i64 0, i64 %tmp989
+ %tmp993 = load i32, ptr %tmp992, align 4
%tmp994 = sub i32 0, %tmp993
%tmp995 = select i1 %tmp991, i32 %tmp993, i32 %tmp994
%tmp996 = add i32 %tmp995, %tmp988
%tmp997 = add nuw nsw i64 %tmp964, 4
%tmp998 = and i64 %tmp997, 1
%tmp999 = icmp eq i64 %tmp998, 0
- %tmp1000 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp17, i64 0, i64 0, i64 %tmp997
- %tmp1001 = load i32, i32* %tmp1000, align 4
+ %tmp1000 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp17, i64 0, i64 0, i64 %tmp997
+ %tmp1001 = load i32, ptr %tmp1000, align 4
%tmp1002 = sub i32 0, %tmp1001
%tmp1003 = select i1 %tmp999, i32 %tmp1001, i32 %tmp1002
%tmp1004 = add i32 %tmp1003, %tmp996
%tmp1009 = phi i32 [ %tmp1048, %bb1007 ], [ 0, %bb963 ]
%tmp1010 = and i64 %tmp1008, 1
%tmp1011 = icmp eq i64 %tmp1010, 0
- %tmp1012 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp18, i64 0, i64 0, i64 %tmp1008
- %tmp1013 = load i32, i32* %tmp1012, align 4
+ %tmp1012 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp18, i64 0, i64 0, i64 %tmp1008
+ %tmp1013 = load i32, ptr %tmp1012, align 4
%tmp1014 = sub i32 0, %tmp1013
%tmp1015 = select i1 %tmp1011, i32 %tmp1013, i32 %tmp1014
%tmp1016 = add i32 %tmp1015, %tmp1009
%tmp1017 = add nuw nsw i64 %tmp1008, 1
%tmp1018 = and i64 %tmp1017, 1
%tmp1019 = icmp eq i64 %tmp1018, 0
- %tmp1020 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp18, i64 0, i64 0, i64 %tmp1017
- %tmp1021 = load i32, i32* %tmp1020, align 4
+ %tmp1020 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp18, i64 0, i64 0, i64 %tmp1017
+ %tmp1021 = load i32, ptr %tmp1020, align 4
%tmp1022 = sub i32 0, %tmp1021
%tmp1023 = select i1 %tmp1019, i32 %tmp1021, i32 %tmp1022
%tmp1024 = add i32 %tmp1023, %tmp1016
%tmp1025 = add nuw nsw i64 %tmp1008, 2
%tmp1026 = and i64 %tmp1025, 1
%tmp1027 = icmp eq i64 %tmp1026, 0
- %tmp1028 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp18, i64 0, i64 0, i64 %tmp1025
- %tmp1029 = load i32, i32* %tmp1028, align 4
+ %tmp1028 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp18, i64 0, i64 0, i64 %tmp1025
+ %tmp1029 = load i32, ptr %tmp1028, align 4
%tmp1030 = sub i32 0, %tmp1029
%tmp1031 = select i1 %tmp1027, i32 %tmp1029, i32 %tmp1030
%tmp1032 = add i32 %tmp1031, %tmp1024
%tmp1033 = add nuw nsw i64 %tmp1008, 3
%tmp1034 = and i64 %tmp1033, 1
%tmp1035 = icmp eq i64 %tmp1034, 0
- %tmp1036 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp18, i64 0, i64 0, i64 %tmp1033
- %tmp1037 = load i32, i32* %tmp1036, align 4
+ %tmp1036 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp18, i64 0, i64 0, i64 %tmp1033
+ %tmp1037 = load i32, ptr %tmp1036, align 4
%tmp1038 = sub i32 0, %tmp1037
%tmp1039 = select i1 %tmp1035, i32 %tmp1037, i32 %tmp1038
%tmp1040 = add i32 %tmp1039, %tmp1032
%tmp1041 = add nuw nsw i64 %tmp1008, 4
%tmp1042 = and i64 %tmp1041, 1
%tmp1043 = icmp eq i64 %tmp1042, 0
- %tmp1044 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %tmp18, i64 0, i64 0, i64 %tmp1041
- %tmp1045 = load i32, i32* %tmp1044, align 4
+ %tmp1044 = getelementptr inbounds [100 x [100 x i32]], ptr %tmp18, i64 0, i64 0, i64 %tmp1041
+ %tmp1045 = load i32, ptr %tmp1044, align 4
%tmp1046 = sub i32 0, %tmp1045
%tmp1047 = select i1 %tmp1043, i32 %tmp1045, i32 %tmp1046
%tmp1048 = add i32 %tmp1047, %tmp1040
%tmp1063 = sub i32 %tmp1062, %tmp960
%tmp1064 = add i32 %tmp1063, %tmp1004
%tmp1065 = sub i32 %tmp1064, %tmp1048
- call void @llvm.lifetime.end.p0i8(i64 40000, i8* nonnull %tmp31) #4
- call void @llvm.lifetime.end.p0i8(i64 40000, i8* nonnull %tmp30) #4
- call void @llvm.lifetime.end.p0i8(i64 40000, i8* nonnull %tmp29) #4
- call void @llvm.lifetime.end.p0i8(i64 400, i8* nonnull %tmp28) #4
- call void @llvm.lifetime.end.p0i8(i64 40000, i8* nonnull %tmp27) #4
- call void @llvm.lifetime.end.p0i8(i64 400, i8* nonnull %tmp26) #4
- call void @llvm.lifetime.end.p0i8(i64 400, i8* nonnull %tmp25) #4
- call void @llvm.lifetime.end.p0i8(i64 40000, i8* nonnull %tmp24) #4
- call void @llvm.lifetime.end.p0i8(i64 400, i8* nonnull %tmp23) #4
- call void @llvm.lifetime.end.p0i8(i64 40000, i8* nonnull %tmp22) #4
- call void @llvm.lifetime.end.p0i8(i64 400, i8* nonnull %tmp21) #4
- call void @llvm.lifetime.end.p0i8(i64 400, i8* nonnull %tmp20) #4
- call void @llvm.lifetime.end.p0i8(i64 400, i8* nonnull %tmp19) #4
+ call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp18) #4
+ call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp17) #4
+ call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp16) #4
+ call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp15) #4
+ call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp14) #4
+ call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp13) #4
+ call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp12) #4
+ call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp11) #4
+ call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp10) #4
+ call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp9) #4
+ call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp8) #4
+ call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp7) #4
+ call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp) #4
ret i32 %tmp1065
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #1
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1) #1
; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #1
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
-define void @test(i8* %p.base, i8 %x) {
+define void @test(ptr %p.base, i8 %x) {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
; CHECK-NEXT: switch i8 [[X:%.*]], label [[WHILE_END:%.*]] [
; CHECK-NEXT: i8 20, label [[WHILE_BODY_PREHEADER]]
; CHECK-NEXT: ]
; CHECK: while.body.preheader:
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[P_BASE:%.*]], i64 1
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P_BASE:%.*]], i64 1
; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK: while.body:
-; CHECK-NEXT: [[LSR_IV:%.*]] = phi i8* [ [[SCEVGEP1:%.*]], [[WHILE_BODY_BACKEDGE:%.*]] ], [ [[SCEVGEP]], [[WHILE_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[Y:%.*]] = load i8, i8* [[LSR_IV]], align 1
+; CHECK-NEXT: [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP1:%.*]], [[WHILE_BODY_BACKEDGE:%.*]] ], [ [[SCEVGEP]], [[WHILE_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[Y:%.*]] = load i8, ptr [[LSR_IV]], align 1
; CHECK-NEXT: switch i8 [[Y]], label [[WHILE_END_LOOPEXIT:%.*]] [
; CHECK-NEXT: i8 10, label [[WHILE_BODY_BACKEDGE]]
; CHECK-NEXT: i8 20, label [[WHILE_BODY_BACKEDGE]]
; CHECK-NEXT: ]
; CHECK: while.body.backedge:
-; CHECK-NEXT: [[SCEVGEP1]] = getelementptr i8, i8* [[LSR_IV]], i64 1
+; CHECK-NEXT: [[SCEVGEP1]] = getelementptr i8, ptr [[LSR_IV]], i64 1
; CHECK-NEXT: br label [[WHILE_BODY]]
; CHECK: while.end.loopexit:
; CHECK-NEXT: br label [[WHILE_END]]
]
while.body:
- %p = phi i8* [ %p.inc, %while.body ], [ %p.inc, %while.body ], [ %p.base, %entry ], [ %p.base, %entry ]
- %p.inc = getelementptr inbounds i8, i8* %p, i64 1
- %y = load i8, i8* %p.inc, align 1
+ %p = phi ptr [ %p.inc, %while.body ], [ %p.inc, %while.body ], [ %p.base, %entry ], [ %p.base, %entry ]
+ %p.inc = getelementptr inbounds i8, ptr %p, i64 1
+ %y = load i8, ptr %p.inc, align 1
switch i8 %y, label %while.end [
i8 10, label %while.body
i8 20, label %while.body
declare void @foo(i8)
-define void @not_addressing_mode(i8* %input, i64 %n) {
+define void @not_addressing_mode(ptr %input, i64 %n) {
; CHECK-LABEL: @not_addressing_mode(
entry:
br label %loop
loop:
; CHECK: loop:
-; CHECK: %lsr.iv = phi i8* [ {{%[^,]+}}, %loop ], [ %input, %entry ]
+; CHECK: %lsr.iv = phi ptr [ {{%[^,]+}}, %loop ], [ %input, %entry ]
%i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
%i.next = add i64 %i, 1
%j = mul i64 %i, -2
; (%input - 2 * %j) is not foldable. Worth another indvar.
- %p = getelementptr i8, i8* %input, i64 %j
- %v = load i8, i8* %p
-; CHECK: %v = load i8, i8* %lsr.iv
+ %p = getelementptr i8, ptr %input, i64 %j
+ %v = load i8, ptr %p
+; CHECK: %v = load i8, ptr %lsr.iv
call void @foo(i8 %v)
%exitcond = icmp slt i64 %i.next, %n
br i1 %exitcond, label %exit, label %loop
declare i32 @getidx()
-define void @test([10000 x i32]* %P) {
+define void @test(ptr %P) {
; <label>:0
br label %Loop
Loop: ; preds = %Loop, %0
%INDVAR = phi i32 [ 0, %0 ], [ %INDVAR2, %Loop ] ; <i32> [#uses=2]
%idx = call i32 @getidx( ) ; <i32> [#uses=1]
- %STRRED = getelementptr [10000 x i32], [10000 x i32]* %P, i32 %INDVAR, i32 %idx ; <i32*> [#uses=1]
- store i32 0, i32* %STRRED
+ %STRRED = getelementptr [10000 x i32], ptr %P, i32 %INDVAR, i32 %idx ; <ptr> [#uses=1]
+ store i32 0, ptr %STRRED
%INDVAR2 = add i32 %INDVAR, 1 ; <i32> [#uses=1]
%cond = call i1 @pred( ) ; <i1> [#uses=1]
br i1 %cond, label %Loop, label %Out
%"class.std::allocator" = type { i8 }
%"class.absl::Storage" = type {}
-define void @0() personality i8* undef {
+define void @0() personality ptr undef {
init1:
- %i14 = invoke i8* undef(i8* null, i8 0)
+ %i14 = invoke ptr undef(ptr null, i8 0)
to label %init2 unwind label %unwind
init2: ; preds = %init1
- %i19 = select i1 undef, %"class.std::allocator"* null, %"class.std::allocator"* null
+ %i19 = select i1 undef, ptr null, ptr null
br label %loop
loop: ; preds = %loop.increment, %init2
%i21 = phi i64 [ %i24, %loop.increment ], [ 0, %init2 ]
- %i22 = getelementptr %"class.std::allocator", %"class.std::allocator"* %i19, i64 %i21
- invoke void undef(i8* null, %"class.std::allocator"* null, %"class.std::allocator"* %i22)
+ %i22 = getelementptr %"class.std::allocator", ptr %i19, i64 %i21
+ invoke void undef(ptr null, ptr null, ptr %i22)
to label %loop.increment unwind label %loop.unwind
loop.increment: ; preds = %loop
catchret from %i28 to label %caught
caught: ; preds = %loop.catch
- invoke void undef(%"class.absl::Storage"* null)
+ invoke void undef(ptr null)
to label %unreach unwind label %unwind
unreach: ; preds = %caught
; This phi node triggers the issue in combination with the optimizable loop
; above. It contains %i19 twice, once from %caught (which doesn't have an
; EHPad) and once from %loop.unwind, which does have one.
- %i32 = phi %"class.std::allocator"* [ %i19, %loop.unwind ], [ %i19, %caught ], [ null, %init1 ]
+ %i32 = phi ptr [ %i19, %loop.unwind ], [ %i19, %caught ], [ null, %init1 ]
%i33 = cleanuppad within none []
cleanupret from %i33 unwind to caller
}
shortcirc_next: ; preds = %no_exit.2, %entry
%indvar37 = phi i32 [ 0, %entry ], [ %indvar.next38, %no_exit.2 ] ; <i32> [#uses=3]
%gep.upgrd.1 = zext i32 %indvar37 to i64 ; <i64> [#uses=1]
- %wp.2.4 = getelementptr i8, i8* null, i64 %gep.upgrd.1 ; <i8*> [#uses=1]
+ %wp.2.4 = getelementptr i8, ptr null, i64 %gep.upgrd.1 ; <ptr> [#uses=1]
br i1 false, label %loopexit.2, label %no_exit.2
no_exit.2: ; preds = %shortcirc_next
%wp.2.4.rec = bitcast i32 %indvar37 to i32 ; <i32> [#uses=1]
%inc.1.rec = add i32 %wp.2.4.rec, 1 ; <i32> [#uses=1]
- %inc.1 = getelementptr i8, i8* null, i32 %inc.1.rec ; <i8*> [#uses=2]
+ %inc.1 = getelementptr i8, ptr null, i32 %inc.1.rec ; <ptr> [#uses=2]
%indvar.next38 = add i32 %indvar37, 1 ; <i32> [#uses=1]
switch i8 0, label %shortcirc_next [
i8 32, label %loopexit.2
i8 59, label %loopexit.2
]
loopexit.2: ; preds = %no_exit.2, %no_exit.2, %shortcirc_next, %entry, %entry
- %wp.2.7 = phi i8* [ null, %entry ], [ null, %entry ], [ %wp.2.4, %shortcirc_next ], [ %inc.1, %no_exit.2 ], [ %inc.1, %no_exit.2 ] ; <i8*> [#uses=0]
+ %wp.2.7 = phi ptr [ null, %entry ], [ null, %entry ], [ %wp.2.4, %shortcirc_next ], [ %inc.1, %no_exit.2 ], [ %inc.1, %no_exit.2 ] ; <ptr> [#uses=0]
ret void
}
; CHECK: incdec.ptr1 =
; CHECK: incdec.ptr2 =
; CHECK: dec =
-define void @f(float* nocapture readonly %a, float* nocapture readonly %b, float* nocapture %c, i32 %n) {
+define void @f(ptr nocapture readonly %a, ptr nocapture readonly %b, ptr nocapture %c, i32 %n) {
entry:
br label %while.cond
while.cond: ; preds = %while.body, %entry
- %a.addr.0 = phi float* [ %a, %entry ], [ %incdec.ptr, %while.body ]
- %b.addr.0 = phi float* [ %b, %entry ], [ %incdec.ptr1, %while.body ]
- %c.addr.0 = phi float* [ %c, %entry ], [ %incdec.ptr2, %while.body ]
+ %a.addr.0 = phi ptr [ %a, %entry ], [ %incdec.ptr, %while.body ]
+ %b.addr.0 = phi ptr [ %b, %entry ], [ %incdec.ptr1, %while.body ]
+ %c.addr.0 = phi ptr [ %c, %entry ], [ %incdec.ptr2, %while.body ]
%n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %while.body ]
%cmp = icmp sgt i32 %n.addr.0, 0
br i1 %cmp, label %while.body, label %while.end
while.body: ; preds = %while.cond
- %incdec.ptr = getelementptr inbounds float, float* %a.addr.0, i32 1
- %tmp = load float, float* %a.addr.0, align 4
- %incdec.ptr1 = getelementptr inbounds float, float* %b.addr.0, i32 1
- %tmp1 = load float, float* %b.addr.0, align 4
+ %incdec.ptr = getelementptr inbounds float, ptr %a.addr.0, i32 1
+ %tmp = load float, ptr %a.addr.0, align 4
+ %incdec.ptr1 = getelementptr inbounds float, ptr %b.addr.0, i32 1
+ %tmp1 = load float, ptr %b.addr.0, align 4
%add = fadd float %tmp, %tmp1
- %incdec.ptr2 = getelementptr inbounds float, float* %c.addr.0, i32 1
- store float %add, float* %c.addr.0, align 4
+ %incdec.ptr2 = getelementptr inbounds float, ptr %c.addr.0, i32 1
+ store float %add, ptr %c.addr.0, align 4
%dec = add nsw i32 %n.addr.0, -1
br label %while.cond
%struct.nsTArray = type { i8 }
%struct.nsTArrayHeader = type { i32 }
-define void @_Z6foobarR8nsTArray(%struct.nsTArray* %aValues, i32 %foo, %struct.nsTArrayHeader* %bar) nounwind {
+define void @_Z6foobarR8nsTArray(ptr %aValues, i32 %foo, ptr %bar) nounwind {
entry:
br label %for.body
for.body: ; preds = %_ZN8nsTArray9ElementAtEi.exit, %entry
%i.06 = phi i32 [ %add, %_ZN8nsTArray9ElementAtEi.exit ], [ 0, %entry ]
- %call.i = call %struct.nsTArrayHeader* @_ZN8nsTArray4Hdr2Ev() nounwind
- %add.ptr.i = getelementptr inbounds %struct.nsTArrayHeader, %struct.nsTArrayHeader* %call.i, i32 1
- %tmp = bitcast %struct.nsTArrayHeader* %add.ptr.i to %struct.nsTArray*
- %arrayidx = getelementptr inbounds %struct.nsTArray, %struct.nsTArray* %tmp, i32 %i.06
+ %call.i = call ptr @_ZN8nsTArray4Hdr2Ev() nounwind
+ %add.ptr.i = getelementptr inbounds %struct.nsTArrayHeader, ptr %call.i, i32 1
+ %arrayidx = getelementptr inbounds %struct.nsTArray, ptr %add.ptr.i, i32 %i.06
%add = add nsw i32 %i.06, 1
- call void @llvm.dbg.value(metadata %struct.nsTArray* %aValues, metadata !0, metadata !DIExpression()) nounwind, !dbg !DILocation(scope: !1)
+ call void @llvm.dbg.value(metadata ptr %aValues, metadata !0, metadata !DIExpression()) nounwind, !dbg !DILocation(scope: !1)
br label %_ZN8nsTArray9ElementAtEi.exit
_ZN8nsTArray9ElementAtEi.exit: ; preds = %for.body
- %arrayidx.i = getelementptr inbounds %struct.nsTArray, %struct.nsTArray* %tmp, i32 %add
- call void @_ZN11nsTArray15ComputeDistanceERKS_Rd(%struct.nsTArray* %arrayidx, %struct.nsTArray* %arrayidx.i) nounwind
+ %arrayidx.i = getelementptr inbounds %struct.nsTArray, ptr %add.ptr.i, i32 %add
+ call void @_ZN11nsTArray15ComputeDistanceERKS_Rd(ptr %arrayidx, ptr %arrayidx.i) nounwind
%cmp = icmp slt i32 %add, %foo
br i1 %cmp, label %for.body, label %for.end
ret void
}
-declare void @_ZN11nsTArray15ComputeDistanceERKS_Rd(%struct.nsTArray*, %struct.nsTArray*)
+declare void @_ZN11nsTArray15ComputeDistanceERKS_Rd(ptr, ptr)
-declare %struct.nsTArrayHeader* @_ZN8nsTArray4Hdr2Ev()
+declare ptr @_ZN8nsTArray4Hdr2Ev()
declare void @llvm.dbg.value(metadata, metadata, metadata) nounwind readnone
while.cond40.preheader:
br label %while.cond40
while.cond40:
- %indvars.iv194 = phi i8* [ null, %while.cond40.preheader ], [ %scevgep, %while.body51 ]
- %tmp.1 = phi i8* [ undef, %while.cond40.preheader ], [ %incdec.ptr, %while.body51 ]
+ %indvars.iv194 = phi ptr [ null, %while.cond40.preheader ], [ %scevgep, %while.body51 ]
+ %tmp.1 = phi ptr [ undef, %while.cond40.preheader ], [ %incdec.ptr, %while.body51 ]
switch i8 undef, label %while.body51 [
i8 0, label %if.then59
]
while.body51: ; preds = %land.end50
- %incdec.ptr = getelementptr inbounds i8, i8* %tmp.1, i64 1
- %scevgep = getelementptr i8, i8* %indvars.iv194, i64 1
+ %incdec.ptr = getelementptr inbounds i8, ptr %tmp.1, i64 1
+ %scevgep = getelementptr i8, ptr %indvars.iv194, i64 1
br label %while.cond40
if.then59: ; preds = %while.end
br i1 undef, label %if.then64, label %if.end113
if.then64: ; preds = %if.then59
- %incdec.ptr88.tmp.2 = select i1 undef, i8* undef, i8* undef
+ %incdec.ptr88.tmp.2 = select i1 undef, ptr undef, ptr undef
br label %if.end113
if.end113: ; preds = %if.then64, %if.then59
- %tmp.4 = phi i8* [ %incdec.ptr88.tmp.2, %if.then64 ], [ undef, %if.then59 ]
- %tmp.4195 = ptrtoint i8* %tmp.4 to i64
+ %tmp.4 = phi ptr [ %incdec.ptr88.tmp.2, %if.then64 ], [ undef, %if.then59 ]
+ %tmp.4195 = ptrtoint ptr %tmp.4 to i64
br label %while.cond132.preheader
while.cond132.preheader: ; preds = %if.end113
- %cmp133173 = icmp eq i8* %tmp.1, %tmp.4
+ %cmp133173 = icmp eq ptr %tmp.1, %tmp.4
br i1 %cmp133173, label %while.cond40.preheader, label %while.body139.lr.ph
while.body139.lr.ph: ; preds = %while.cond132.preheader
- %scevgep198 = getelementptr i8, i8* %indvars.iv194, i64 0
- %scevgep198199 = ptrtoint i8* %scevgep198 to i64
+ %scevgep198199 = ptrtoint ptr %indvars.iv194 to i64
br label %while.body139
while.body139: ; preds = %while.body139, %while.body139.lr.ph
- %start_of_var.0177 = phi i8* [ %tmp.1, %while.body139.lr.ph ], [ null, %while.body139 ]
+ %start_of_var.0177 = phi ptr [ %tmp.1, %while.body139.lr.ph ], [ null, %while.body139 ]
br i1 undef, label %while.cond132.while.cond.loopexit_crit_edge, label %while.body139
}
br i1 %tobool, label %for.cond, label %for.end
for.end:
-; CHECK: %tmp1 = load i32, i32* @d, align 4
-; CHECK-NEXT: %tmp2 = load i32, i32* @d, align 4
+; CHECK: %tmp1 = load i32, ptr @d, align 4
+; CHECK-NEXT: %tmp2 = load i32, ptr @d, align 4
; CHECK-NEXT: %0 = sub i32 %tmp1, %tmp2
- %tmp1 = load i32, i32* @d, align 4
+ %tmp1 = load i32, ptr @d, align 4
%add = add nsw i32 %tmp1, %g.0
- %tmp2 = load i32, i32* @d, align 4
+ %tmp2 = load i32, ptr @d, align 4
%tobool26 = icmp eq i32 %x, 0
br i1 %tobool26, label %for.end5, label %for.body.lr.ph
; Function Attrs: nounwind optsize ssp uwtable
define i32 @main() #0 {
entry:
- %0 = load i32, i32* getelementptr inbounds (%struct.anon, %struct.anon* @a, i64 0, i32 0), align 4, !tbaa !1
+ %0 = load i32, ptr @a, align 4, !tbaa !1
%tobool7.i = icmp eq i32 %0, 0
- %.promoted.i = load i32, i32* getelementptr inbounds (%struct.anon, %struct.anon* @a, i64 0, i32 2), align 4, !tbaa !6
- %f.promoted.i = load i32, i32* @f, align 4, !tbaa !7
+ %.promoted.i = load i32, ptr getelementptr inbounds (%struct.anon, ptr @a, i64 0, i32 2), align 4, !tbaa !6
+ %f.promoted.i = load i32, ptr @f, align 4, !tbaa !7
br label %for.body6.i.outer
for.body6.i.outer: ; preds = %entry, %lor.end.i
br i1 %tobool12.i, label %lor.rhs.i, label %lor.end.i
lor.rhs.i: ; preds = %if.end9.i
- %1 = load i32, i32* @b, align 4, !tbaa !7
+ %1 = load i32, ptr @b, align 4, !tbaa !7
%dec.i = add nsw i32 %1, -1
- store i32 %dec.i, i32* @b, align 4, !tbaa !7
+ store i32 %dec.i, ptr @b, align 4, !tbaa !7
%tobool13.i = icmp ne i32 %1, 0
br label %lor.end.i
br i1 %cmp.i, label %for.body6.i.outer, label %fn1.exit
fn1.exit: ; preds = %lor.end.i
- store i32 0, i32* @g, align 4, !tbaa !7
- store i32 %or14.i, i32* getelementptr inbounds (%struct.anon, %struct.anon* @a, i64 0, i32 2), align 4, !tbaa !6
- store i32 %or15.i, i32* @f, align 4, !tbaa !7
- store i32 %add.i, i32* getelementptr inbounds (%struct.anon, %struct.anon* @e, i64 0, i32 1), align 4, !tbaa !8
- store i32 0, i32* @h, align 4, !tbaa !7
- %3 = load i32, i32* @b, align 4, !tbaa !7
- %call1 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %3) #2
+ store i32 0, ptr @g, align 4, !tbaa !7
+ store i32 %or14.i, ptr getelementptr inbounds (%struct.anon, ptr @a, i64 0, i32 2), align 4, !tbaa !6
+ store i32 %or15.i, ptr @f, align 4, !tbaa !7
+ store i32 %add.i, ptr getelementptr inbounds (%struct.anon, ptr @e, i64 0, i32 1), align 4, !tbaa !8
+ store i32 0, ptr @h, align 4, !tbaa !7
+ %3 = load i32, ptr @b, align 4, !tbaa !7
+ %call1 = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %3) #2
ret i32 0
}
; Function Attrs: nounwind optsize
-declare i32 @printf(i8* nocapture readonly, ...) #1
+declare i32 @printf(ptr nocapture readonly, ...) #1
attributes #0 = { nounwind optsize ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-pc-windows-msvc"
-define void @f() personality i32 (...)* @__CxxFrameHandler3 {
+define void @f() personality ptr @__CxxFrameHandler3 {
entry:
br label %for.cond.i
for.cond.i: ; preds = %for.inc.i, %entry
- %_First.addr.0.i = phi i32* [ null, %entry ], [ %incdec.ptr.i, %for.inc.i ]
+ %_First.addr.0.i = phi ptr [ null, %entry ], [ %incdec.ptr.i, %for.inc.i ]
invoke void @g()
to label %for.inc.i unwind label %catch.dispatch.i
%cs = catchswitch within none [label %for.cond.1.preheader.i] unwind to caller
for.cond.1.preheader.i: ; preds = %catch.dispatch.i
- %0 = catchpad within %cs [i8* null, i32 64, i8* null]
- %cmp.i = icmp eq i32* %_First.addr.0.i, null
+ %0 = catchpad within %cs [ptr null, i32 64, ptr null]
+ %cmp.i = icmp eq ptr %_First.addr.0.i, null
br label %for.cond.1.i
for.cond.1.i: ; preds = %for.body.i, %for.cond.1.preheader.i
br label %for.cond.1.i
for.inc.i: ; preds = %for.cond.i
- %incdec.ptr.i = getelementptr inbounds i32, i32* %_First.addr.0.i, i64 1
+ %incdec.ptr.i = getelementptr inbounds i32, ptr %_First.addr.0.i, i64 1
br label %for.cond.i
for.end.i: ; preds = %for.cond.1.i
; CHECK-LABEL: define void @f(
; CHECK: %[[PHI:.*]] = phi i64 [ %[[IV_NEXT:.*]], {{.*}} ], [ 0, {{.*}} ]
-; CHECK: %[[ITOP:.*]] = inttoptr i64 %[[PHI]] to i32*
-; CHECK: %[[CMP:.*]] = icmp eq i32* %[[ITOP]], null
+; CHECK: %[[ITOP:.*]] = inttoptr i64 %[[PHI]] to ptr
+; CHECK: %[[CMP:.*]] = icmp eq ptr %[[ITOP]], null
; CHECK: %[[IV_NEXT]] = add i64 %[[PHI]], -4
declare void @g()
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
target triple = "i386-pc-linux-gnu"
-@g_14 = internal global i32 1 ; <i32*> [#uses=1]
-@g_39 = internal global i16 -5 ; <i16*> [#uses=2]
-@g_43 = internal global i32 -6 ; <i32*> [#uses=3]
-@g_33 = internal global i32 -1269044541 ; <i32*> [#uses=1]
-@g_137 = internal global i32 8 ; <i32*> [#uses=1]
-@g_82 = internal global i32 -5 ; <i32*> [#uses=3]
-@g_91 = internal global i32 1 ; <i32*> [#uses=1]
-@g_197 = internal global i32 1 ; <i32*> [#uses=4]
-@g_207 = internal global i32 1 ; <i32*> [#uses=2]
-@g_222 = internal global i16 4165 ; <i16*> [#uses=1]
-@g_247 = internal global i8 -21 ; <i8*> [#uses=2]
-@g_260 = internal global i32 1 ; <i32*> [#uses=2]
-@g_221 = internal global i16 -17503 ; <i16*> [#uses=3]
-@g_267 = internal global i16 1 ; <i16*> [#uses=1]
-@llvm.used = appending global [1 x i8*] [ i8* bitcast (i32 (i32, i32, i16, i32, i8, i32)* @func_44 to i8*) ], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
+@g_14 = internal global i32 1 ; <ptr> [#uses=1]
+@g_39 = internal global i16 -5 ; <ptr> [#uses=2]
+@g_43 = internal global i32 -6 ; <ptr> [#uses=3]
+@g_33 = internal global i32 -1269044541 ; <ptr> [#uses=1]
+@g_137 = internal global i32 8 ; <ptr> [#uses=1]
+@g_82 = internal global i32 -5 ; <ptr> [#uses=3]
+@g_91 = internal global i32 1 ; <ptr> [#uses=1]
+@g_197 = internal global i32 1 ; <ptr> [#uses=4]
+@g_207 = internal global i32 1 ; <ptr> [#uses=2]
+@g_222 = internal global i16 4165 ; <ptr> [#uses=1]
+@g_247 = internal global i8 -21 ; <ptr> [#uses=2]
+@g_260 = internal global i32 1 ; <ptr> [#uses=2]
+@g_221 = internal global i16 -17503 ; <ptr> [#uses=3]
+@g_267 = internal global i16 1 ; <ptr> [#uses=1]
+@llvm.used = appending global [1 x ptr] [ ptr @func_44 ], section "llvm.metadata" ; <ptr> [#uses=0]
define i32 @func_44(i32 %p_45, i32 %p_46, i16 zeroext %p_48, i32 %p_49, i8 zeroext %p_50, i32 %p_52) nounwind {
entry:
tail call i32 @func_116( i8 zeroext 2 ) nounwind ; <i32>:0 [#uses=0]
tail call i32 @func_63( i16 signext 2 ) nounwind ; <i32>:1 [#uses=1]
- load i16, i16* @g_39, align 2 ; <i16>:2 [#uses=1]
+ load i16, ptr @g_39, align 2 ; <i16>:2 [#uses=1]
tail call i32 @func_63( i16 signext %2 ) nounwind ; <i32>:3 [#uses=1]
trunc i32 %3 to i16 ; <i16>:4 [#uses=1]
and i16 %4, 1 ; <i16>:5 [#uses=1]
tail call i32 @func_74( i16 zeroext %5, i8 zeroext %6, i16 zeroext %7, i16 zeroext 0 ) nounwind ; <i32>:8 [#uses=0]
tail call i32 @func_124( i32 544824386 ) nounwind ; <i32>:9 [#uses=0]
zext i8 %p_50 to i32 ; <i32>:10 [#uses=1]
- load i32, i32* @g_43, align 4 ; <i32>:11 [#uses=1]
+ load i32, ptr @g_43, align 4 ; <i32>:11 [#uses=1]
icmp sle i32 %10, %11 ; <i1>:12 [#uses=1]
zext i1 %12 to i32 ; <i32>:13 [#uses=2]
- load i8, i8* @g_247, align 1 ; <i8>:14 [#uses=1]
+ load i8, ptr @g_247, align 1 ; <i8>:14 [#uses=1]
trunc i32 %p_45 to i16 ; <i16>:15 [#uses=1]
zext i8 %14 to i16 ; <i16>:16 [#uses=1]
tail call i32 @func_74( i16 zeroext %15, i8 zeroext 0, i16 zeroext %16, i16 zeroext 23618 ) nounwind ; <i32>:17 [#uses=4]
icmp eq i32 %.0343, 0 ; <i1>:30 [#uses=1]
%.0341 = select i1 %30, i32 1, i32 %.0343 ; <i32> [#uses=1]
urem i32 %23, %.0341 ; <i32>:31 [#uses=1]
- load i32, i32* @g_137, align 4 ; <i32>:32 [#uses=4]
+ load i32, ptr @g_137, align 4 ; <i32>:32 [#uses=4]
icmp slt i32 %32, 0 ; <i1>:33 [#uses=1]
br i1 %33, label %bb202, label %bb198
bb223: ; preds = %bb222, %bb215
%iftmp.437.0 = phi i32 [ 0, %bb222 ], [ 1, %bb215 ] ; <i32> [#uses=1]
- load i32, i32* @g_91, align 4 ; <i32>:55 [#uses=3]
+ load i32, ptr @g_91, align 4 ; <i32>:55 [#uses=3]
tail call i32 @func_103( i16 zeroext 4 ) nounwind ; <i32>:56 [#uses=0]
tail call i32 @func_112( i32 0, i16 zeroext -31374 ) nounwind ; <i32>:57 [#uses=0]
- load i32, i32* @g_197, align 4 ; <i32>:58 [#uses=1]
+ load i32, ptr @g_197, align 4 ; <i32>:58 [#uses=1]
tail call i32 @func_124( i32 28156 ) nounwind ; <i32>:59 [#uses=1]
- load i32, i32* @g_260, align 4 ; <i32>:60 [#uses=1]
- load i32, i32* @g_43, align 4 ; <i32>:61 [#uses=1]
+ load i32, ptr @g_260, align 4 ; <i32>:60 [#uses=1]
+ load i32, ptr @g_43, align 4 ; <i32>:61 [#uses=1]
xor i32 %61, %60 ; <i32>:62 [#uses=1]
mul i32 %62, %59 ; <i32>:63 [#uses=1]
trunc i32 %63 to i8 ; <i8>:64 [#uses=1]
%or.cond352 = or i1 %70, %67 ; <i1> [#uses=1]
select i1 %or.cond352, i32 0, i32 %55 ; <i32>:71 [#uses=1]
%.353 = ashr i32 %66, %71 ; <i32> [#uses=2]
- load i16, i16* @g_221, align 2 ; <i16>:72 [#uses=1]
+ load i16, ptr @g_221, align 2 ; <i16>:72 [#uses=1]
zext i16 %72 to i32 ; <i32>:73 [#uses=1]
icmp ugt i32 %.353, 31 ; <i1>:74 [#uses=1]
select i1 %74, i32 0, i32 %.353 ; <i32>:75 [#uses=1]
add i32 %.0323, %iftmp.437.0 ; <i32>:76 [#uses=1]
and i32 %48, 255 ; <i32>:77 [#uses=2]
add i32 %77, 2042556439 ; <i32>:78 [#uses=1]
- load i32, i32* @g_207, align 4 ; <i32>:79 [#uses=2]
+ load i32, ptr @g_207, align 4 ; <i32>:79 [#uses=2]
icmp ugt i32 %79, 31 ; <i1>:80 [#uses=1]
select i1 %80, i32 0, i32 %79 ; <i32>:81 [#uses=1]
%.0320 = lshr i32 %77, %81 ; <i32> [#uses=1]
zext i1 %82 to i8 ; <i8>:83 [#uses=1]
tail call i32 @func_25( i8 zeroext %83 ) nounwind ; <i32>:84 [#uses=1]
xor i32 %84, 1 ; <i32>:85 [#uses=1]
- load i32, i32* @g_197, align 4 ; <i32>:86 [#uses=1]
+ load i32, ptr @g_197, align 4 ; <i32>:86 [#uses=1]
add i32 %86, 1 ; <i32>:87 [#uses=1]
add i32 %87, %85 ; <i32>:88 [#uses=1]
icmp ugt i32 %76, %88 ; <i1>:89 [#uses=1]
br i1 %89, label %bb241, label %bb311
bb241: ; preds = %bb223
- store i16 -9, i16* @g_221, align 2
+ store i16 -9, ptr @g_221, align 2
udiv i32 %p_52, 1538244727 ; <i32>:90 [#uses=1]
- load i32, i32* @g_207, align 4 ; <i32>:91 [#uses=1]
+ load i32, ptr @g_207, align 4 ; <i32>:91 [#uses=1]
sub i32 %91, %90 ; <i32>:92 [#uses=1]
- load i32, i32* @g_14, align 4 ; <i32>:93 [#uses=1]
+ load i32, ptr @g_14, align 4 ; <i32>:93 [#uses=1]
trunc i32 %93 to i16 ; <i16>:94 [#uses=1]
trunc i32 %p_46 to i16 ; <i16>:95 [#uses=2]
sub i16 %94, %95 ; <i16>:96 [#uses=1]
- load i32, i32* @g_197, align 4 ; <i32>:97 [#uses=1]
+ load i32, ptr @g_197, align 4 ; <i32>:97 [#uses=1]
trunc i32 %97 to i16 ; <i16>:98 [#uses=1]
tail call i32 @func_55( i32 -346178830, i16 zeroext %98, i16 zeroext %95 ) nounwind ; <i32>:99 [#uses=0]
zext i16 %p_48 to i32 ; <i32>:100 [#uses=1]
- load i8, i8* @g_247, align 1 ; <i8>:101 [#uses=1]
+ load i8, ptr @g_247, align 1 ; <i8>:101 [#uses=1]
zext i8 %101 to i32 ; <i32>:102 [#uses=1]
sub i32 %100, %102 ; <i32>:103 [#uses=1]
tail call i32 @func_55( i32 %103, i16 zeroext -2972, i16 zeroext %96 ) nounwind ; <i32>:104 [#uses=0]
xor i32 %92, 2968 ; <i32>:105 [#uses=1]
- load i32, i32* @g_197, align 4 ; <i32>:106 [#uses=1]
+ load i32, ptr @g_197, align 4 ; <i32>:106 [#uses=1]
icmp ugt i32 %105, %106 ; <i1>:107 [#uses=1]
zext i1 %107 to i32 ; <i32>:108 [#uses=1]
- store i32 %108, i32* @g_33, align 4
+ store i32 %108, ptr @g_33, align 4
br label %bb248
bb248: ; preds = %bb284, %bb241
br i1 %110, label %bb272.thread, label %bb255.thread
bb272.thread: ; preds = %bb248
- store i32 1, i32* @g_82
- load i16, i16* @g_267, align 2 ; <i16>:111 [#uses=1]
+ store i32 1, ptr @g_82
+ load i16, ptr @g_267, align 2 ; <i16>:111 [#uses=1]
icmp eq i16 %111, 0 ; <i1>:112 [#uses=1]
br i1 %112, label %bb311.loopexit.split, label %bb268
bb255.thread: ; preds = %bb248
- load i32, i32* @g_260, align 4 ; <i32>:113 [#uses=1]
+ load i32, ptr @g_260, align 4 ; <i32>:113 [#uses=1]
sub i32 %113, %p_52 ; <i32>:114 [#uses=1]
and i32 %114, -20753 ; <i32>:115 [#uses=1]
icmp ne i32 %115, 0 ; <i1>:116 [#uses=1]
zext i1 %116 to i16 ; <i16>:117 [#uses=1]
- store i16 %117, i16* @g_221, align 2
+ store i16 %117, ptr @g_221, align 2
br label %bb284
bb268: ; preds = %bb268, %bb272.thread
br i1 %121, label %bb268, label %bb274.split
bb274.split: ; preds = %bb268
- store i32 %120, i32* @g_82
+ store i32 %120, ptr @g_82
br i1 %50, label %bb279, label %bb276
bb276: ; preds = %bb274.split
- store i16 0, i16* @g_222, align 2
+ store i16 0, ptr @g_222, align 2
br label %bb284
bb279: ; preds = %bb274.split
%p_49_addr.0 = phi i32 [ %p_49_addr.1.reg2mem.0, %bb279 ], [ %p_49_addr.1.reg2mem.0, %bb276 ], [ 0, %bb255.thread ] ; <i32> [#uses=1]
%p_48_addr.1 = phi i16 [ %124, %bb279 ], [ %118, %bb276 ], [ %p_48_addr.2.reg2mem.0, %bb255.thread ] ; <i16> [#uses=1]
%p_45_addr.0 = phi i32 [ %p_45_addr.1.reg2mem.0, %bb279 ], [ %p_45_addr.1.reg2mem.0, %bb276 ], [ 8, %bb255.thread ] ; <i32> [#uses=3]
- load i32, i32* @g_43, align 4 ; <i32>:125 [#uses=1]
+ load i32, ptr @g_43, align 4 ; <i32>:125 [#uses=1]
trunc i32 %125 to i8 ; <i8>:126 [#uses=1]
tail call i32 @func_116( i8 zeroext %126 ) nounwind ; <i32>:127 [#uses=0]
lshr i32 65255, %p_45_addr.0 ; <i32>:128 [#uses=1]
%.op = lshr i32 %128, 31 ; <i32> [#uses=1]
%.op.op = xor i32 %.op, 1 ; <i32> [#uses=1]
%.354..lobit.not = select i1 %129, i32 1, i32 %.op.op ; <i32> [#uses=1]
- load i16, i16* @g_39, align 2 ; <i16>:130 [#uses=1]
+ load i16, ptr @g_39, align 2 ; <i16>:130 [#uses=1]
zext i16 %130 to i32 ; <i32>:131 [#uses=1]
icmp slt i32 %.354..lobit.not, %131 ; <i1>:132 [#uses=1]
zext i1 %132 to i32 ; <i32>:133 [#uses=1]
ret i32 %40
bb311.loopexit.split: ; preds = %bb272.thread
- store i32 1, i32* @g_82
+ store i32 1, ptr @g_82
ret i32 1
bb311: ; preds = %bb223, %bb202
target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-pc-windows-msvc18.0.0"
-%struct.L = type { i8, i8* }
+%struct.L = type { i8, ptr }
declare i32 @__CxxFrameHandler3(...)
-@GV1 = external global %struct.L*
+@GV1 = external global ptr
@GV2 = external global %struct.L
-define void @b_copy_ctor() personality i32 (...)* @__CxxFrameHandler3 {
+define void @b_copy_ctor() personality ptr @__CxxFrameHandler3 {
; CHECK-LABEL: @b_copy_ctor(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load %struct.L*, %struct.L** @GV1, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast %struct.L* [[TMP0]] to i8*
+; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr @GV1, align 8
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK: for.cond:
; CHECK-NEXT: [[LSR_IV:%.*]] = phi i64 [ [[LSR_IV_NEXT:%.*]], [[CALL_I_NOEXC:%.*]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[LSR_IV2:%.*]] = inttoptr i64 [[LSR_IV]] to %struct.L*
+; CHECK-NEXT: [[LSR_IV2:%.*]] = inttoptr i64 [[LSR_IV]] to ptr
; CHECK-NEXT: invoke void @a_copy_ctor()
; CHECK-NEXT: to label [[CALL_I_NOEXC]] unwind label [[CATCH_DISPATCH:%.*]]
; CHECK: call.i.noexc:
; CHECK: catch.dispatch:
; CHECK-NEXT: [[TMP2:%.*]] = catchswitch within none [label %catch] unwind to caller
; CHECK: catch:
-; CHECK-NEXT: [[TMP3:%.*]] = catchpad within [[TMP2]] [i8* null, i32 64, i8* null]
-; CHECK-NEXT: [[CMP16:%.*]] = icmp eq %struct.L* [[LSR_IV2]], null
+; CHECK-NEXT: [[TMP3:%.*]] = catchpad within [[TMP2]] [ptr null, i32 64, ptr null]
+; CHECK-NEXT: [[CMP16:%.*]] = icmp eq ptr [[LSR_IV2]], null
; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[LSR_IV]], -1
-; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, i8* [[TMP1]], i64 [[TMP4]]
-; CHECK-NEXT: [[UGLYGEP1:%.*]] = bitcast i8* [[UGLYGEP]] to %struct.L*
+; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[TMP0]], i64 [[TMP4]]
; CHECK-NEXT: br i1 [[CMP16]], label [[FOR_END:%.*]], label [[FOR_BODY_PREHEADER:%.*]]
; CHECK: for.body.preheader:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq %struct.L* [[UGLYGEP1]], @GV2
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[UGLYGEP]], @GV2
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY]]
; CHECK: for.end.loopexit:
; CHECK-NEXT: br label [[FOR_END]]
; CHECK-NEXT: ret void
;
entry:
- %0 = load %struct.L*, %struct.L** @GV1, align 8
+ %0 = load ptr, ptr @GV1, align 8
br label %for.cond
for.cond: ; preds = %call.i.noexc, %entry
- %d.0 = phi %struct.L* [ %0, %entry ], [ %incdec.ptr, %call.i.noexc ]
+ %d.0 = phi ptr [ %0, %entry ], [ %incdec.ptr, %call.i.noexc ]
invoke void @a_copy_ctor()
to label %call.i.noexc unwind label %catch.dispatch
call.i.noexc: ; preds = %for.cond
- %incdec.ptr = getelementptr inbounds %struct.L, %struct.L* %d.0, i64 1
+ %incdec.ptr = getelementptr inbounds %struct.L, ptr %d.0, i64 1
br label %for.cond
catch.dispatch: ; preds = %for.cond
%1 = catchswitch within none [label %catch] unwind to caller
catch: ; preds = %catch.dispatch
- %2 = catchpad within %1 [i8* null, i32 64, i8* null]
- %cmp16 = icmp eq %struct.L* %0, %d.0
+ %2 = catchpad within %1 [ptr null, i32 64, ptr null]
+ %cmp16 = icmp eq ptr %0, %d.0
br i1 %cmp16, label %for.end, label %for.body
for.body: ; preds = %for.body, %catch
- %cmp = icmp eq %struct.L* @GV2, %d.0
+ %cmp = icmp eq ptr @GV2, %d.0
br i1 %cmp, label %for.end, label %for.body
for.end: ; preds = %for.body, %catch
; RUN: opt < %s -passes='require<scalar-evolution>'
; PR 3086
- %struct.Cls = type { i32, i8, [2 x %struct.Cls*], [2 x %struct.Lit*] }
+ %struct.Cls = type { i32, i8, [2 x ptr], [2 x ptr] }
%struct.Lit = type { i8 }
define fastcc i64 @collect_clauses() nounwind {
br label %bb11
bb5: ; preds = %bb9
- %0 = load %struct.Lit*, %struct.Lit** %storemerge, align 8 ; <%struct.Lit*> [#uses=0]
+ %0 = load ptr, ptr %storemerge, align 8 ; <ptr> [#uses=0]
%indvar.next8 = add i64 %storemerge.rec, 1 ; <i64> [#uses=1]
br label %bb9
bb9: ; preds = %bb22, %bb5
%storemerge.rec = phi i64 [ %indvar.next8, %bb5 ], [ 0, %bb22 ] ; <i64> [#uses=2]
- %storemerge = getelementptr %struct.Lit*, %struct.Lit** null, i64 %storemerge.rec ; <%struct.Lit**> [#uses=2]
- %1 = icmp ugt %struct.Lit** null, %storemerge ; <i1> [#uses=1]
+ %storemerge = getelementptr ptr, ptr null, i64 %storemerge.rec ; <ptr> [#uses=2]
+ %1 = icmp ugt ptr null, %storemerge ; <i1> [#uses=1]
br i1 %1, label %bb5, label %bb22
bb11: ; preds = %bb22, %entry
- %2 = load %struct.Cls*, %struct.Cls** null, align 8 ; <%struct.Cls*> [#uses=0]
+ %2 = load ptr, ptr null, align 8 ; <ptr> [#uses=0]
br label %bb22
bb22: ; preds = %bb11, %bb9
target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-pc-windows-msvc19.0.24215"
-define void @fn3() personality i32 (...)* @__CxxFrameHandler3 {
+define void @fn3() personality ptr @__CxxFrameHandler3 {
entry:
%call = invoke i32 @fn2()
to label %for.cond.preheader unwind label %catch.dispatch2
%0 = catchswitch within none [label %catch] unwind label %catch.dispatch2
catch: ; preds = %catch.dispatch
- %1 = catchpad within %0 [i8* null, i32 64, i8* null]
- invoke void @_CxxThrowException(i8* null, i8* null) #2 [ "funclet"(token %1) ]
+ %1 = catchpad within %0 [ptr null, i32 64, ptr null]
+ invoke void @_CxxThrowException(ptr null, ptr null) #2 [ "funclet"(token %1) ]
to label %unreachable unwind label %catch.dispatch2
catch.dispatch2: ; preds = %catch.dispatch, %catch, %entry
%2 = catchswitch within none [label %catch3] unwind to caller
catch3: ; preds = %catch.dispatch2
- %3 = catchpad within %2 [i8* null, i32 64, i8* null]
+ %3 = catchpad within %2 [ptr null, i32 64, ptr null]
call void @fn1(i32 %a.0) [ "funclet"(token %3) ]
catchret from %3 to label %try.cont4
declare void @fn1(i32)
-declare void @_CxxThrowException(i8*, i8*)
+declare void @_CxxThrowException(ptr, ptr)
; RUN: opt < %s -loop-reduce | llvm-dis
; PR3399
-@g_53 = external global i32 ; <i32*> [#uses=1]
+@g_53 = external global i32 ; <ptr> [#uses=1]
define i32 @foo() nounwind {
bb5.thread:
bb1: ; preds = %bb
%l_2.0.reg2mem.0 = sub i32 0, %indvar ; <i32> [#uses=1]
- %0 = load volatile i32, i32* @g_53, align 4 ; <i32> [#uses=1]
+ %0 = load volatile i32, ptr @g_53, align 4 ; <i32> [#uses=1]
%1 = trunc i32 %l_2.0.reg2mem.0 to i16 ; <i16> [#uses=1]
%2 = trunc i32 %0 to i16 ; <i16> [#uses=1]
%3 = mul i16 %2, %1 ; <i16> [#uses=1]
_ZNK11QModelIndex7isValidEv.exit.i: ; preds = %bb.i, %entry
%result.0.i = phi i32 [ 0, %entry ], [ %indvar.next, %bb.i ] ; <i32> [#uses=2]
- %0 = load i32*, i32** null, align 4 ; <%struct.QAbstractItemDelegate*> [#uses=0]
+ %0 = load ptr, ptr null, align 4 ; <ptr> [#uses=0]
br i1 false, label %_ZN18qdesigner_internalL5levelEP18QAbstractItemModelRK11QModelIndex.exit, label %bb.i
_ZN18qdesigner_internalL5levelEP18QAbstractItemModelRK11QModelIndex.exit: ; preds = %_ZNK11QModelIndex7isValidEv.exit.i
- %1 = call i32 @_ZNK9QTreeView11indentationEv(i32* null) nounwind ; <i32> [#uses=1]
+ %1 = call i32 @_ZNK9QTreeView11indentationEv(ptr null) nounwind ; <i32> [#uses=1]
%2 = mul i32 %1, %result.0.i ; <i32> [#uses=1]
%3 = add i32 %2, -2 ; <i32> [#uses=1]
%4 = add i32 %3, 0 ; <i32> [#uses=1]
- store i32 %4, i32* null, align 8
+ store i32 %4, ptr null, align 8
unreachable
}
-declare i32 @_ZNK9QTreeView11indentationEv(i32*)
+declare i32 @_ZNK9QTreeView11indentationEv(ptr)
define void @test() {
; CHECK-LABEL: test
bb:
- %tmp = load i32, i32 addrspace(3)* undef, align 4
+ %tmp = load i32, ptr addrspace(3) undef, align 4
br label %bb1
bb1: ; preds = %bb38, %bb
define dso_local i32 @d() local_unnamed_addr #0 !dbg !16 {
entry:
- %b.promoted = load i32, i32* @b, align 4, !tbaa !29
+ %b.promoted = load i32, ptr @b, align 4, !tbaa !29
%mul = mul nsw i32 %b.promoted, %b.promoted, !dbg !33
%mul.1 = mul nsw i32 %mul, %mul, !dbg !33
%mul.2 = mul nsw i32 %mul.1, %mul.1, !dbg !33
%mul.45 = mul nsw i32 %mul.44, %mul.44, !dbg !33
%mul.46 = mul nsw i32 %mul.45, %mul.45, !dbg !33
%mul.47 = mul nsw i32 %mul.46, %mul.46, !dbg !33
- store i32 49, i32* @c, align 4, !dbg !36, !tbaa !29
- store i32 %mul.47, i32* @b, align 4, !dbg !37, !tbaa !29
- %.pr = load i32, i32* @a, align 4, !dbg !38, !tbaa !29
+ store i32 49, ptr @c, align 4, !dbg !36, !tbaa !29
+ store i32 %mul.47, ptr @b, align 4, !dbg !37, !tbaa !29
+ %.pr = load i32, ptr @a, align 4, !dbg !38, !tbaa !29
%tobool.not8 = icmp eq i32 %.pr, 0, !dbg !39
br i1 %tobool.not8, label %for.end3, label %for.body2.preheader, !dbg !39
br i1 %tobool.not, label %for.cond1.for.end3_crit_edge, label %for.body2, !dbg !39, !llvm.loop !42
for.cond1.for.end3_crit_edge: ; preds = %for.body2
- store i32 0, i32* @a, align 4, !dbg !40, !tbaa !29
+ store i32 0, ptr @a, align 4, !dbg !40, !tbaa !29
br label %for.end3, !dbg !39
for.end3: ; preds = %for.cond1.for.end3_crit_edge, %entry
define void @test() {
; CHECK-LABEL: @test(
bb:
- %tmp = load atomic i64, i64 addrspace(1)* undef unordered, align 8
+ %tmp = load atomic i64, ptr addrspace(1) undef unordered, align 8
%tmp1 = sub i64 4294967294, undef
br label %bb5
; *D++ = F;
; }
-define void @foo(double* %D, double* %E, double %F) nounwind {
+define void @foo(ptr %D, ptr %E, double %F) nounwind {
entry:
- %tmp.24 = icmp eq double* %D, %E ; <i1> [#uses=1]
+ %tmp.24 = icmp eq ptr %D, %E ; <i1> [#uses=1]
br i1 %tmp.24, label %return, label %no_exit
no_exit: ; preds = %no_exit, %entry
%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %no_exit ] ; <i32> [#uses=2]
%D_addr.0.0.rec = bitcast i32 %indvar to i32 ; <i32> [#uses=2]
- %D_addr.0.0 = getelementptr double, double* %D, i32 %D_addr.0.0.rec ; <double*> [#uses=1]
+ %D_addr.0.0 = getelementptr double, ptr %D, i32 %D_addr.0.0.rec ; <ptr> [#uses=1]
%inc.rec = add i32 %D_addr.0.0.rec, 1 ; <i32> [#uses=1]
- %inc = getelementptr double, double* %D, i32 %inc.rec ; <double*> [#uses=1]
- store double %F, double* %D_addr.0.0
- %tmp.2 = icmp eq double* %inc, %E ; <i1> [#uses=1]
+ %inc = getelementptr double, ptr %D, i32 %inc.rec ; <ptr> [#uses=1]
+ store double %F, ptr %D_addr.0.0
+ %tmp.2 = icmp eq ptr %inc, %E ; <i1> [#uses=1]
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
br i1 %tmp.2, label %return, label %no_exit
return: ; preds = %no_exit, %entry
declare i1 @pred()
-define void @test(i32* %P) {
+define void @test(ptr %P) {
; <label>:0
br label %Loop
Loop: ; preds = %Loop, %0
%i = phi i32 [ 0, %0 ], [ %i.next, %Loop ]
%INDVAR = phi i32 [ 0, %0 ], [ %INDVAR2, %Loop ] ; <i32> [#uses=2]
- %STRRED = getelementptr i32, i32* %P, i32 %INDVAR ; <i32*> [#uses=1]
- store i32 0, i32* %STRRED
+ %STRRED = getelementptr i32, ptr %P, i32 %INDVAR ; <ptr> [#uses=1]
+ store i32 0, ptr %STRRED
%INDVAR2 = add i32 %INDVAR, 1 ; <i32> [#uses=1]
%i.next = add i32 %i, 1
%cond = call i1 @pred( ) ; <i1> [#uses=1]
; RUN: opt < %s -S -loop-reduce | FileCheck %s
-define void @testIVNext(i64* nocapture %a, i64 signext %m, i64 signext %n) {
+define void @testIVNext(ptr nocapture %a, i64 signext %m, i64 signext %n) {
entry:
br label %for.body
for.body:
%indvars.iv.prol = phi i64 [ %indvars.iv.next.prol, %for.body ], [ %m, %entry ]
%i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
- %uglygep138 = getelementptr i64, i64* %a, i64 %i
- store i64 55, i64* %uglygep138, align 4
+ %uglygep138 = getelementptr i64, ptr %a, i64 %i
+ store i64 55, ptr %uglygep138, align 4
%indvars.iv.next.prol = add nuw nsw i64 %indvars.iv.prol, 1
%i.next = add i64 %i, 1
%i.cmp.not = icmp eq i64 %i.next, %n
ret void
}
-define void @testIV(i64* nocapture %a, i64 signext %m, i64 signext %n) {
+define void @testIV(ptr nocapture %a, i64 signext %m, i64 signext %n) {
entry:
br label %for.body
for.body:
%iv.prol = phi i64 [ %iv.next.prol, %for.body ], [ %m, %entry ]
%i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
- %uglygep138 = getelementptr i64, i64* %a, i64 %i
- store i64 55, i64* %uglygep138, align 4
+ %uglygep138 = getelementptr i64, ptr %a, i64 %i
+ store i64 55, ptr %uglygep138, align 4
%iv.next.prol = add nuw nsw i64 %iv.prol, 1
%i.next = add i64 %i, 1
%i.cmp.not = icmp eq i64 %i.next, %n
while.end: ; preds = %while.body
%inc.lcssa = phi i32 [ %depth.04, %while.body ]
- store i32 %inc.lcssa, i32* null, align 4
+ store i32 %inc.lcssa, ptr null, align 4
ret void
}
; <label>:16 ; preds = %16, %15
%17 = phi i32 [ %21, %16 ], [ undef, %15 ]
%18 = sub i32 %17, 1623127498
- %19 = getelementptr inbounds i32, i32* undef, i32 %18
- store i32 undef, i32* %19, align 4
+ %19 = getelementptr inbounds i32, ptr undef, i32 %18
+ store i32 undef, ptr %19, align 4
%20 = add i32 %17, 1623127499
%21 = add i32 %20, -1623127498
%22 = add i32 %21, -542963121
ret void
for.body: ; preds = %for.cond
- %0 = load i16, i16* @a, align 1
+ %0 = load i16, ptr @a, align 1
%cmp = icmp sgt i16 %0, %c.0
br i1 %cmp, label %land.rhs, label %land.end
; See http://reviews.llvm.org/D20703 for context.
define void @test() {
entry:
- %bf.load = load i32, i32* null, align 4
+ %bf.load = load i32, ptr null, align 4
%bf.clear = lshr i32 %bf.load, 1
%div = and i32 %bf.clear, 134217727
%sub = add nsw i32 %div, -1
; RUN: opt < %s -loop-reduce -S | grep mul | count 1
; LSR should not make two copies of the Q*L expression in the preheader!
-define i8 @test(i8* %A, i8* %B, i32 %L, i32 %Q, i32 %N.s) {
+define i8 @test(ptr %A, ptr %B, i32 %L, i32 %Q, i32 %N.s) {
entry:
%tmp.6 = mul i32 %Q, %L ; <i32> [#uses=1]
%N = bitcast i32 %N.s to i32 ; <i32> [#uses=1]
%indvar = bitcast i32 %indvar.ui to i32 ; <i32> [#uses=1]
%N_addr.0.0 = sub i32 %N.s, %indvar ; <i32> [#uses=1]
%tmp.8 = add i32 %N_addr.0.0, %tmp.6 ; <i32> [#uses=2]
- %tmp.9 = getelementptr i8, i8* %A, i32 %tmp.8 ; <i8*> [#uses=1]
- %tmp.10 = load i8, i8* %tmp.9 ; <i8> [#uses=1]
- %tmp.17 = getelementptr i8, i8* %B, i32 %tmp.8 ; <i8*> [#uses=1]
- %tmp.18 = load i8, i8* %tmp.17 ; <i8> [#uses=1]
+ %tmp.9 = getelementptr i8, ptr %A, i32 %tmp.8 ; <ptr> [#uses=1]
+ %tmp.10 = load i8, ptr %tmp.9 ; <i8> [#uses=1]
+ %tmp.17 = getelementptr i8, ptr %B, i32 %tmp.8 ; <ptr> [#uses=1]
+ %tmp.18 = load i8, ptr %tmp.17 ; <i8> [#uses=1]
%tmp.19 = sub i8 %tmp.10, %tmp.18 ; <i8> [#uses=1]
%tmp.21 = add i8 %tmp.19, %Sum.0.0 ; <i8> [#uses=2]
%indvar.next = add i32 %indvar.ui, 1 ; <i32> [#uses=2]
%struct.planet.0.3.6.11.12.15.16.17.24.25.26.33.44 = type { double, double, double, double, double, double, double }
; Function Attrs: nounwind uwtable
-define dso_local void @advance(i32 %nbodies, %struct.planet.0.3.6.11.12.15.16.17.24.25.26.33.44* nocapture %bodies) local_unnamed_addr #0 {
+define dso_local void @advance(i32 %nbodies, ptr nocapture %bodies) local_unnamed_addr #0 {
; CHECK-LABEL: @advance(
; CHECK: for.cond.loopexit:
; CHECK: [[LSR_IV_NEXT:%.*]] = add i64 [[LSR_IV:%.*]], -1
for.body3: ; preds = %for.body3, %for.body
%indvars.iv98 = phi i64 [ %indvars.iv, %for.body ], [ %indvars.iv.next99, %for.body3 ]
- %z9 = getelementptr inbounds %struct.planet.0.3.6.11.12.15.16.17.24.25.26.33.44, %struct.planet.0.3.6.11.12.15.16.17.24.25.26.33.44* %bodies, i64 %indvars.iv98, i32 2
- %tmp = load double, double* %z9, align 8, !tbaa !0
+ %z9 = getelementptr inbounds %struct.planet.0.3.6.11.12.15.16.17.24.25.26.33.44, ptr %bodies, i64 %indvars.iv98, i32 2
+ %tmp = load double, ptr %z9, align 8, !tbaa !0
%indvars.iv.next99 = add nuw nsw i64 %indvars.iv98, 1
%exitcond = icmp eq i64 %indvars.iv.next99, %wide.trip.count
br i1 %exitcond, label %for.cond.loopexit, label %for.body3
declare i1 @pred(i32)
-define void @test([700 x i32]* %nbeaux_.0__558, i32* %i_.16574) {
+define void @test(ptr %nbeaux_.0__558, ptr %i_.16574) {
then.0:
br label %no_exit.2
no_exit.2: ; preds = %no_exit.2, %then.0
%indvar630.ui = phi i32 [ 0, %then.0 ], [ %indvar.next631, %no_exit.2 ] ; <i32> [#uses=3]
%indvar630 = bitcast i32 %indvar630.ui to i32 ; <i32> [#uses=2]
%gep.upgrd.1 = zext i32 %indvar630.ui to i64 ; <i64> [#uses=1]
- %tmp.38 = getelementptr [700 x i32], [700 x i32]* %nbeaux_.0__558, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
- store i32 0, i32* %tmp.38
+ %tmp.38 = getelementptr [700 x i32], ptr %nbeaux_.0__558, i32 0, i64 %gep.upgrd.1 ; <ptr> [#uses=1]
+ store i32 0, ptr %tmp.38
%inc.2 = add i32 %indvar630, 2 ; <i32> [#uses=1]
%tmp.34 = call i1 @pred( i32 %indvar630 ) ; <i1> [#uses=1]
%indvar.next631 = add i32 %indvar630.ui, 1 ; <i32> [#uses=1]
br i1 %tmp.34, label %no_exit.2, label %loopexit.2.loopexit
loopexit.2.loopexit: ; preds = %no_exit.2
- store i32 %inc.2, i32* %i_.16574
+ store i32 %inc.2, ptr %i_.16574
ret void
}
target datalayout = "e-p:32:32-n32"
target triple = "i686-apple-darwin8"
-@flags2 = external global [8193 x i8], align 32 ; <[8193 x i8]*> [#uses=1]
+@flags2 = external global [8193 x i8], align 32 ; <ptr> [#uses=1]
define void @foo(i32 %k, i32 %i.s) {
entry:
%tmp.16 = add i32 %tmp.15, %tmp. ; <i32> [#uses=2]
%k_addr.0.0 = bitcast i32 %tmp.16 to i32 ; <i32> [#uses=1]
%gep.upgrd.1 = zext i32 %tmp.16 to i64 ; <i64> [#uses=1]
- %tmp = getelementptr [8193 x i8], [8193 x i8]* @flags2, i32 0, i64 %gep.upgrd.1 ; <i8*> [#uses=1]
- store i8 0, i8* %tmp
+ %tmp = getelementptr [8193 x i8], ptr @flags2, i32 0, i64 %gep.upgrd.1 ; <ptr> [#uses=1]
+ store i8 0, ptr %tmp
%k_addr.0 = add i32 %k_addr.0.0, %i.s ; <i32> [#uses=1]
%tmp.upgrd.2 = icmp sgt i32 %k_addr.0, 8192 ; <i1> [#uses=1]
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
declare i1 @pred(i32)
-define void @test([10000 x i32]* %P, i32 %STRIDE) {
+define void @test(ptr %P, i32 %STRIDE) {
; <label>:0
br label %Loop
Loop: ; preds = %Loop, %0
define void @test1() {
; CHECK-LABEL: @test1(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[VAL:%.*]] = load i32, i32 addrspace(3)* undef, align 4
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr addrspace(3) undef, align 4
; CHECK-NEXT: [[VAL1:%.*]] = add i32 undef, 12
; CHECK-NEXT: [[VAL2:%.*]] = trunc i64 undef to i32
; CHECK-NEXT: [[VAL3:%.*]] = mul i32 [[VAL1]], [[VAL2]]
; CHECK-NEXT: br i1 false, label [[BB7]], label [[BB15SPLITSPLITSPLITSPLITSPLITSPLIT]]
;
bb:
- %val = load i32, i32 addrspace(3)* undef, align 4
+ %val = load i32, ptr addrspace(3) undef, align 4
%val1 = add i32 undef, 12
%val2 = trunc i64 undef to i32
%val3 = mul i32 %val1, %val2
define void @test2() {
; CHECK-LABEL: @test2(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[VAL:%.*]] = bitcast i8* null to i32*
-; CHECK-NEXT: [[VAL1:%.*]] = load i32, i32* [[VAL]], align 4
-; CHECK-NEXT: [[VAL2:%.*]] = bitcast i8* null to i32*
-; CHECK-NEXT: [[VAL3:%.*]] = load i32, i32* [[VAL2]], align 4
+; CHECK-NEXT: [[VAL1:%.*]] = load i32, ptr null, align 4
+; CHECK-NEXT: [[VAL3:%.*]] = load i32, ptr null, align 4
; CHECK-NEXT: br label [[BB6:%.*]]
; CHECK: bb4:
; CHECK-NEXT: [[VAL5:%.*]] = sext i32 [[VAL16:%.*]] to i64
; CHECK-NEXT: br i1 [[VAL17]], label [[BB6]], label [[BB4:%.*]]
;
bb:
- %val = bitcast i8* null to i32*
- %val1 = load i32, i32* %val, align 4
- %val2 = bitcast i8* null to i32*
- %val3 = load i32, i32* %val2, align 4
+ %val1 = load i32, ptr null, align 4
+ %val3 = load i32, ptr null, align 4
br label %bb6
bb4: ; preds = %bb12
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @_Z17test_array_boundsPiS_i(i32* nocapture %A, i32* nocapture readonly %B, i32 %Length) !dbg !8 {
+define void @_Z17test_array_boundsPiS_i(ptr nocapture %A, ptr nocapture readonly %B, i32 %Length) !dbg !8 {
entry:
%cmp9 = icmp sgt i32 %Length, 0, !dbg !32
br i1 %cmp9, label %for.body.preheader, label %for.end, !dbg !32
for.body:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv, !dbg !35
- %0 = load i32, i32* %arrayidx, align 4, !dbg !35, !tbaa !18
+ %arrayidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv, !dbg !35
+ %0 = load i32, ptr %arrayidx, align 4, !dbg !35, !tbaa !18
%idxprom1 = sext i32 %0 to i64, !dbg !35
- %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %idxprom1, !dbg !35
- %1 = load i32, i32* %arrayidx2, align 4, !dbg !35, !tbaa !18
- %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv, !dbg !35
- store i32 %1, i32* %arrayidx4, align 4, !dbg !35, !tbaa !18
+ %arrayidx2 = getelementptr inbounds i32, ptr %A, i64 %idxprom1, !dbg !35
+ %1 = load i32, ptr %arrayidx2, align 4, !dbg !35, !tbaa !18
+ %arrayidx4 = getelementptr inbounds i32, ptr %A, i64 %indvars.iv, !dbg !35
+ store i32 %1, ptr %arrayidx4, align 4, !dbg !35, !tbaa !18
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !32
%lftr.wideiv = trunc i64 %indvars.iv.next to i32, !dbg !32
%exitcond = icmp eq i32 %lftr.wideiv, %Length, !dbg !32
;
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @func(i32* nocapture %A, i32* nocapture readonly %B, i32 %Length) #0 {
+define void @func(ptr nocapture %A, ptr nocapture readonly %B, i32 %Length) #0 {
entry:
%cmp9 = icmp sgt i32 %Length, 0
br i1 %cmp9, label %for.body.preheader, label %for.end
for.body:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%idxprom1 = sext i32 %0 to i64
- %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %idxprom1
- %1 = load i32, i32* %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- store i32 %1, i32* %arrayidx4, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %A, i64 %idxprom1
+ %1 = load i32, ptr %arrayidx2, align 4
+ %arrayidx4 = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
+ store i32 %1, ptr %arrayidx4, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %Length
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @_Z17test_array_boundsPiS_i(i32* nocapture %A, i32* nocapture readonly %B, i32 %Length) !dbg !8 {
+define void @_Z17test_array_boundsPiS_i(ptr nocapture %A, ptr nocapture readonly %B, i32 %Length) !dbg !8 {
entry:
%cmp9 = icmp sgt i32 %Length, 0, !dbg !32
br i1 %cmp9, label %for.body.preheader, label %for.end, !dbg !32
for.body:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv, !dbg !35
- %0 = load i32, i32* %arrayidx, align 4, !dbg !35, !tbaa !18
+ %arrayidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv, !dbg !35
+ %0 = load i32, ptr %arrayidx, align 4, !dbg !35, !tbaa !18
%idxprom1 = sext i32 %0 to i64, !dbg !35
- %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %idxprom1, !dbg !35
- %1 = load i32, i32* %arrayidx2, align 4, !dbg !35, !tbaa !18
- %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv, !dbg !35
- store i32 %1, i32* %arrayidx4, align 4, !dbg !35, !tbaa !18
+ %arrayidx2 = getelementptr inbounds i32, ptr %A, i64 %idxprom1, !dbg !35
+ %1 = load i32, ptr %arrayidx2, align 4, !dbg !35, !tbaa !18
+ %arrayidx4 = getelementptr inbounds i32, ptr %A, i64 %indvars.iv, !dbg !35
+ store i32 %1, ptr %arrayidx4, align 4, !dbg !35, !tbaa !18
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !32
%lftr.wideiv = trunc i64 %indvars.iv.next to i32, !dbg !32
%exitcond = icmp eq i32 %lftr.wideiv, %Length, !dbg !32
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @_Z17test_array_boundsPiS_i(i32* nocapture %A, i32* nocapture readonly %B, i32 %Length) !dbg !8 {
+define void @_Z17test_array_boundsPiS_i(ptr nocapture %A, ptr nocapture readonly %B, i32 %Length) !dbg !8 {
entry:
%cmp9 = icmp sgt i32 %Length, 0, !dbg !32
br i1 %cmp9, label %for.body.preheader, label %for.end, !dbg !32
for.body:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv, !dbg !35
- %0 = load i32, i32* %arrayidx, align 4, !dbg !35, !tbaa !18
+ %arrayidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv, !dbg !35
+ %0 = load i32, ptr %arrayidx, align 4, !dbg !35, !tbaa !18
%idxprom1 = sext i32 %0 to i64, !dbg !35
- %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %idxprom1, !dbg !35
- %1 = load i32, i32* %arrayidx2, align 4, !dbg !35, !tbaa !18
- %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv, !dbg !35
- store i32 %1, i32* %arrayidx4, align 4, !dbg !35, !tbaa !18
+ %arrayidx2 = getelementptr inbounds i32, ptr %A, i64 %idxprom1, !dbg !35
+ %1 = load i32, ptr %arrayidx2, align 4, !dbg !35, !tbaa !18
+ %arrayidx4 = getelementptr inbounds i32, ptr %A, i64 %indvars.iv, !dbg !35
+ store i32 %1, ptr %arrayidx4, align 4, !dbg !35, !tbaa !18
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !32
%lftr.wideiv = trunc i64 %indvars.iv.next to i32, !dbg !32
%exitcond = icmp eq i32 %lftr.wideiv, %Length, !dbg !32
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @_Z17test_array_boundsPiS_i(i32* nocapture %A, i32* nocapture readonly %B, i32 %Length) !dbg !8 {
+define void @_Z17test_array_boundsPiS_i(ptr nocapture %A, ptr nocapture readonly %B, i32 %Length) !dbg !8 {
entry:
%cmp9 = icmp sgt i32 %Length, 0, !dbg !32
br i1 %cmp9, label %for.body.preheader, label %for.end, !dbg !32, !llvm.loop !34
for.body:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv, !dbg !35
- %0 = load i32, i32* %arrayidx, align 4, !dbg !35, !tbaa !18
+ %arrayidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv, !dbg !35
+ %0 = load i32, ptr %arrayidx, align 4, !dbg !35, !tbaa !18
%idxprom1 = sext i32 %0 to i64, !dbg !35
- %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %idxprom1, !dbg !35
- %1 = load i32, i32* %arrayidx2, align 4, !dbg !35, !tbaa !18
- %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv, !dbg !35
- store i32 %1, i32* %arrayidx4, align 4, !dbg !35, !tbaa !18
+ %arrayidx2 = getelementptr inbounds i32, ptr %A, i64 %idxprom1, !dbg !35
+ %1 = load i32, ptr %arrayidx2, align 4, !dbg !35, !tbaa !18
+ %arrayidx4 = getelementptr inbounds i32, ptr %A, i64 %indvars.iv, !dbg !35
+ store i32 %1, ptr %arrayidx4, align 4, !dbg !35, !tbaa !18
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !32
%lftr.wideiv = trunc i64 %indvars.iv.next to i32, !dbg !32
%exitcond = icmp eq i32 %lftr.wideiv, %Length, !dbg !32
; CHECK: %j.1 = phi
; CHECK: %j.2 = phi
; CHECK: %j.3 = phi
-define void @fore_aft_less(i32* noalias nocapture %A, i32 %N, i32* noalias nocapture readonly %B) {
+define void @fore_aft_less(ptr noalias nocapture %A, i32 %N, ptr noalias nocapture readonly %B) {
entry:
%cmp = icmp sgt i32 %N, 0
br i1 %cmp, label %for.outer, label %cleanup
for.outer:
%i = phi i32 [ %add7, %for.latch ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 1, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 1, ptr %arrayidx, align 4
br label %for.inner
for.inner:
%j = phi i32 [ %add6, %for.inner ], [ 0, %for.outer ]
%sum = phi i32 [ %add, %for.inner ], [ 0, %for.outer ]
- %arrayidx5 = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx5, align 4
%mul = mul nsw i32 %0, %i
%add = add nsw i32 %mul, %sum
%add6 = add nuw nsw i32 %j, 1
for.latch:
%add7 = add nuw nsw i32 %i, 1
%add72 = add nuw nsw i32 %i, -1
- %arrayidx8 = getelementptr inbounds i32, i32* %A, i32 %add72
- store i32 %add, i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds i32, ptr %A, i32 %add72
+ store i32 %add, ptr %arrayidx8, align 4
%exitcond29 = icmp eq i32 %add7, %N
br i1 %exitcond29, label %cleanup, label %for.outer
; CHECK: %j.1 = phi
; CHECK: %j.2 = phi
; CHECK: %j.3 = phi
-define void @fore_aft_eq(i32* noalias nocapture %A, i32 %N, i32* noalias nocapture readonly %B) {
+define void @fore_aft_eq(ptr noalias nocapture %A, i32 %N, ptr noalias nocapture readonly %B) {
entry:
%cmp = icmp sgt i32 %N, 0
br i1 %cmp, label %for.outer, label %cleanup
for.outer:
%i = phi i32 [ %add7, %for.latch ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 1, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 1, ptr %arrayidx, align 4
br label %for.inner
for.inner:
%j = phi i32 [ %add6, %for.inner ], [ 0, %for.outer ]
%sum = phi i32 [ %add, %for.inner ], [ 0, %for.outer ]
- %arrayidx5 = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx5, align 4
%mul = mul nsw i32 %0, %i
%add = add nsw i32 %mul, %sum
%add6 = add nuw nsw i32 %j, 1
for.latch:
%add7 = add nuw nsw i32 %i, 1
%add72 = add nuw nsw i32 %i, 0
- %arrayidx8 = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 %add, i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 %add, ptr %arrayidx8, align 4
%exitcond29 = icmp eq i32 %add7, %N
br i1 %exitcond29, label %cleanup, label %for.outer
; CHECK-LABEL: fore_aft_more
; CHECK: %j = phi
; CHECK-NOT: %j.1 = phi
-define void @fore_aft_more(i32* noalias nocapture %A, i32 %N, i32* noalias nocapture readonly %B) {
+define void @fore_aft_more(ptr noalias nocapture %A, i32 %N, ptr noalias nocapture readonly %B) {
entry:
%cmp = icmp sgt i32 %N, 0
br i1 %cmp, label %for.outer, label %cleanup
for.outer:
%i = phi i32 [ %add7, %for.latch ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 1, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 1, ptr %arrayidx, align 4
br label %for.inner
for.inner:
%j = phi i32 [ %add6, %for.inner ], [ 0, %for.outer ]
%sum = phi i32 [ %add, %for.inner ], [ 0, %for.outer ]
- %arrayidx5 = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx5, align 4
%mul = mul nsw i32 %0, %i
%add = add nsw i32 %mul, %sum
%add6 = add nuw nsw i32 %j, 1
for.latch:
%add7 = add nuw nsw i32 %i, 1
%add72 = add nuw nsw i32 %i, 1
- %arrayidx8 = getelementptr inbounds i32, i32* %A, i32 %add72
- store i32 %add, i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds i32, ptr %A, i32 %add72
+ store i32 %add, ptr %arrayidx8, align 4
%exitcond29 = icmp eq i32 %add7, %N
br i1 %exitcond29, label %cleanup, label %for.outer
; CHECK: %j.1 = phi
; CHECK: %j.2 = phi
; CHECK: %j.3 = phi
-define void @fore_sub_less(i32* noalias nocapture %A, i32 %N, i32* noalias nocapture readonly %B) {
+define void @fore_sub_less(ptr noalias nocapture %A, i32 %N, ptr noalias nocapture readonly %B) {
entry:
%cmp = icmp sgt i32 %N, 0
br i1 %cmp, label %for.outer, label %cleanup
for.outer:
%i = phi i32 [ %add7, %for.latch ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 1, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 1, ptr %arrayidx, align 4
br label %for.inner
for.inner:
%j = phi i32 [ %add6, %for.inner ], [ 0, %for.outer ]
%sum = phi i32 [ %add, %for.inner ], [ 0, %for.outer ]
- %arrayidx5 = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx5, align 4
%mul = mul nsw i32 %0, %i
%add = add nsw i32 %mul, %sum
%add72 = add nuw nsw i32 %i, -1
- %arrayidx8 = getelementptr inbounds i32, i32* %A, i32 %add72
- store i32 %add, i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds i32, ptr %A, i32 %add72
+ store i32 %add, ptr %arrayidx8, align 4
%add6 = add nuw nsw i32 %j, 1
%exitcond = icmp eq i32 %add6, %N
br i1 %exitcond, label %for.latch, label %for.inner
; CHECK: %j.1 = phi
; CHECK: %j.2 = phi
; CHECK: %j.3 = phi
-define void @fore_sub_eq(i32* noalias nocapture %A, i32 %N, i32* noalias nocapture readonly %B) {
+define void @fore_sub_eq(ptr noalias nocapture %A, i32 %N, ptr noalias nocapture readonly %B) {
entry:
%cmp = icmp sgt i32 %N, 0
br i1 %cmp, label %for.outer, label %cleanup
for.outer:
%i = phi i32 [ %add7, %for.latch ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 1, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 1, ptr %arrayidx, align 4
br label %for.inner
for.inner:
%j = phi i32 [ %add6, %for.inner ], [ 0, %for.outer ]
%sum = phi i32 [ %add, %for.inner ], [ 0, %for.outer ]
- %arrayidx5 = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx5, align 4
%mul = mul nsw i32 %0, %i
%add = add nsw i32 %mul, %sum
%add72 = add nuw nsw i32 %i, 0
- %arrayidx8 = getelementptr inbounds i32, i32* %A, i32 %add72
- store i32 %add, i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds i32, ptr %A, i32 %add72
+ store i32 %add, ptr %arrayidx8, align 4
%add6 = add nuw nsw i32 %j, 1
%exitcond = icmp eq i32 %add6, %N
br i1 %exitcond, label %for.latch, label %for.inner
; CHECK-LABEL: fore_sub_more
; CHECK: %j = phi
; CHECK-NOT: %j.1 = phi
-define void @fore_sub_more(i32* noalias nocapture %A, i32 %N, i32* noalias nocapture readonly %B) {
+define void @fore_sub_more(ptr noalias nocapture %A, i32 %N, ptr noalias nocapture readonly %B) {
entry:
%cmp = icmp sgt i32 %N, 0
br i1 %cmp, label %for.outer, label %cleanup
for.outer:
%i = phi i32 [ %add7, %for.latch ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 1, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 1, ptr %arrayidx, align 4
br label %for.inner
for.inner:
%j = phi i32 [ %add6, %for.inner ], [ 0, %for.outer ]
%sum = phi i32 [ %add, %for.inner ], [ 0, %for.outer ]
- %arrayidx5 = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx5, align 4
%mul = mul nsw i32 %0, %i
%add = add nsw i32 %mul, %sum
%add72 = add nuw nsw i32 %i, 1
- %arrayidx8 = getelementptr inbounds i32, i32* %A, i32 %add72
- store i32 %add, i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds i32, ptr %A, i32 %add72
+ store i32 %add, ptr %arrayidx8, align 4
%add6 = add nuw nsw i32 %j, 1
%exitcond = icmp eq i32 %add6, %N
br i1 %exitcond, label %for.latch, label %for.inner
; CHECK: %j.1 = phi
; CHECK: %j.2 = phi
; CHECK: %j.3 = phi
-define void @sub_aft_less(i32* noalias nocapture %A, i32 %N, i32* noalias nocapture readonly %B) {
+define void @sub_aft_less(ptr noalias nocapture %A, i32 %N, ptr noalias nocapture readonly %B) {
entry:
%cmp = icmp sgt i32 %N, 0
br i1 %cmp, label %for.outer, label %cleanup
for.inner:
%j = phi i32 [ %add6, %for.inner ], [ 0, %for.outer ]
%sum = phi i32 [ %add, %for.inner ], [ 0, %for.outer ]
- %arrayidx5 = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx5, align 4
%mul = mul nsw i32 %0, %i
%add = add nsw i32 %mul, %sum
%add6 = add nuw nsw i32 %j, 1
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 1, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 1, ptr %arrayidx, align 4
%exitcond = icmp eq i32 %add6, %N
br i1 %exitcond, label %for.latch, label %for.inner
for.latch:
%add7 = add nuw nsw i32 %i, 1
%add72 = add nuw nsw i32 %i, -1
- %arrayidx8 = getelementptr inbounds i32, i32* %A, i32 %add72
- store i32 %add, i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds i32, ptr %A, i32 %add72
+ store i32 %add, ptr %arrayidx8, align 4
%exitcond29 = icmp eq i32 %add7, %N
br i1 %exitcond29, label %cleanup, label %for.outer
; CHECK: %j.1 = phi
; CHECK: %j.2 = phi
; CHECK: %j.3 = phi
-define void @sub_aft_eq(i32* noalias nocapture %A, i32 %N, i32* noalias nocapture readonly %B) {
+define void @sub_aft_eq(ptr noalias nocapture %A, i32 %N, ptr noalias nocapture readonly %B) {
entry:
%cmp = icmp sgt i32 %N, 0
br i1 %cmp, label %for.outer, label %cleanup
for.inner:
%j = phi i32 [ %add6, %for.inner ], [ 0, %for.outer ]
%sum = phi i32 [ %add, %for.inner ], [ 0, %for.outer ]
- %arrayidx5 = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx5, align 4
%mul = mul nsw i32 %0, %i
%add = add nsw i32 %mul, %sum
%add6 = add nuw nsw i32 %j, 1
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 1, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 1, ptr %arrayidx, align 4
%exitcond = icmp eq i32 %add6, %N
br i1 %exitcond, label %for.latch, label %for.inner
for.latch:
%add7 = add nuw nsw i32 %i, 1
%add72 = add nuw nsw i32 %i, 0
- %arrayidx8 = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 %add, i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 %add, ptr %arrayidx8, align 4
%exitcond29 = icmp eq i32 %add7, %N
br i1 %exitcond29, label %cleanup, label %for.outer
; CHECK-LABEL: sub_aft_more
; CHECK: %j = phi
; CHECK-NOT: %j.1 = phi
-define void @sub_aft_more(i32* noalias nocapture %A, i32 %N, i32* noalias nocapture readonly %B) {
+define void @sub_aft_more(ptr noalias nocapture %A, i32 %N, ptr noalias nocapture readonly %B) {
entry:
%cmp = icmp sgt i32 %N, 0
br i1 %cmp, label %for.outer, label %cleanup
for.inner:
%j = phi i32 [ %add6, %for.inner ], [ 0, %for.outer ]
%sum = phi i32 [ %add, %for.inner ], [ 0, %for.outer ]
- %arrayidx5 = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx5, align 4
%mul = mul nsw i32 %0, %i
%add = add nsw i32 %mul, %sum
%add6 = add nuw nsw i32 %j, 1
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 1, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 1, ptr %arrayidx, align 4
%exitcond = icmp eq i32 %add6, %N
br i1 %exitcond, label %for.latch, label %for.inner
for.latch:
%add7 = add nuw nsw i32 %i, 1
%add72 = add nuw nsw i32 %i, 1
- %arrayidx8 = getelementptr inbounds i32, i32* %A, i32 %add72
- store i32 %add, i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds i32, ptr %A, i32 %add72
+ store i32 %add, ptr %arrayidx8, align 4
%exitcond29 = icmp eq i32 %add7, %N
br i1 %exitcond29, label %cleanup, label %for.outer
; CHECK-LABEL: sub_sub_less
; CHECK: %j = phi
; CHECK-NOT: %j.1 = phi
-define void @sub_sub_less(i32* noalias nocapture %A, i32 %N, i32* noalias nocapture readonly %B) {
+define void @sub_sub_less(ptr noalias nocapture %A, i32 %N, ptr noalias nocapture readonly %B) {
entry:
%cmp = icmp sgt i32 %N, 0
br i1 %cmp, label %for.outer, label %cleanup
for.inner:
%j = phi i32 [ %add6, %for.inner ], [ 0, %for.outer ]
%sum = phi i32 [ %add, %for.inner ], [ 0, %for.outer ]
- %arrayidx5 = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx5, align 4
%mul = mul nsw i32 %0, %i
%add = add nsw i32 %mul, %sum
%add6 = add nuw nsw i32 %j, 1
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 1, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 1, ptr %arrayidx, align 4
%add72 = add nuw nsw i32 %i, -1
- %arrayidx8 = getelementptr inbounds i32, i32* %A, i32 %add72
- store i32 %add, i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds i32, ptr %A, i32 %add72
+ store i32 %add, ptr %arrayidx8, align 4
%exitcond = icmp eq i32 %add6, %N
br i1 %exitcond, label %for.latch, label %for.inner
; CHECK: %j.1 = phi
; CHECK: %j.2 = phi
; CHECK: %j.3 = phi
-define void @sub_sub_eq(i32* noalias nocapture %A, i32 %N, i32* noalias nocapture readonly %B) {
+define void @sub_sub_eq(ptr noalias nocapture %A, i32 %N, ptr noalias nocapture readonly %B) {
entry:
%cmp = icmp sgt i32 %N, 0
br i1 %cmp, label %for.outer, label %cleanup
for.inner:
%j = phi i32 [ %add6, %for.inner ], [ 0, %for.outer ]
%sum = phi i32 [ %add, %for.inner ], [ 0, %for.outer ]
- %arrayidx5 = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx5, align 4
%mul = mul nsw i32 %0, %i
%add = add nsw i32 %mul, %sum
%add6 = add nuw nsw i32 %j, 1
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 1, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 1, ptr %arrayidx, align 4
%add72 = add nuw nsw i32 %i, 0
- %arrayidx8 = getelementptr inbounds i32, i32* %A, i32 %add72
- store i32 %add, i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds i32, ptr %A, i32 %add72
+ store i32 %add, ptr %arrayidx8, align 4
%exitcond = icmp eq i32 %add6, %N
br i1 %exitcond, label %for.latch, label %for.inner
; CHECK-LABEL: sub_sub_more
; CHECK: %j = phi
; CHECK-NOT: %j.1 = phi
-define void @sub_sub_more(i32* noalias nocapture %A, i32 %N, i32* noalias nocapture readonly %B) {
+define void @sub_sub_more(ptr noalias nocapture %A, i32 %N, ptr noalias nocapture readonly %B) {
entry:
%cmp = icmp sgt i32 %N, 0
br i1 %cmp, label %for.outer, label %cleanup
for.inner:
%j = phi i32 [ %add6, %for.inner ], [ 0, %for.outer ]
%sum = phi i32 [ %add, %for.inner ], [ 0, %for.outer ]
- %arrayidx5 = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx5, align 4
%mul = mul nsw i32 %0, %i
%add = add nsw i32 %mul, %sum
%add6 = add nuw nsw i32 %j, 1
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 1, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 1, ptr %arrayidx, align 4
%add72 = add nuw nsw i32 %i, 1
- %arrayidx8 = getelementptr inbounds i32, i32* %A, i32 %add72
- store i32 %add, i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds i32, ptr %A, i32 %add72
+ store i32 %add, ptr %arrayidx8, align 4
%exitcond = icmp eq i32 %add6, %N
br i1 %exitcond, label %for.latch, label %for.inner
; CHECK-LABEL: sub_sub_less
; CHECK: %j = phi
; CHECK-NOT: %j.1 = phi
-define void @sub_sub_less([100 x i32]* noalias nocapture %A, i32 %N, i32* noalias nocapture readonly %B) {
+define void @sub_sub_less(ptr noalias nocapture %A, i32 %N, ptr noalias nocapture readonly %B) {
entry:
%cmp = icmp sgt i32 %N, 0
br i1 %cmp, label %for.outer, label %cleanup
for.inner:
%j = phi i32 [ %add6, %for.inner ], [ 0, %for.outer ]
%sum = phi i32 [ %add, %for.inner ], [ 0, %for.outer ]
- %arrayidx5 = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx5, align 4
%mul = mul nsw i32 %0, %i
%add = add nsw i32 %mul, %sum
%add6 = add nuw nsw i32 %j, 1
- %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* %A, i32 %i, i32 %j
- store i32 1, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [100 x i32], ptr %A, i32 %i, i32 %j
+ store i32 1, ptr %arrayidx, align 4
%add72 = add nuw nsw i32 %i, 1
%add73 = add nuw nsw i32 %j, -1
- %arrayidx8 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i32 %add72, i32 %add73
- store i32 %add, i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds [100 x i32], ptr %A, i32 %add72, i32 %add73
+ store i32 %add, ptr %arrayidx8, align 4
%exitcond = icmp eq i32 %add6, %N
br i1 %exitcond, label %for.latch, label %for.inner
; CHECK: %j.1 = phi
; CHECK: %j.2 = phi
; CHECK: %j.3 = phi
-define void @sub_sub_eq([100 x i32]* noalias nocapture %A, i32 %N, i32* noalias nocapture readonly %B) {
+define void @sub_sub_eq(ptr noalias nocapture %A, i32 %N, ptr noalias nocapture readonly %B) {
entry:
%cmp = icmp sgt i32 %N, 0
br i1 %cmp, label %for.outer, label %cleanup
for.inner:
%j = phi i32 [ %add6, %for.inner ], [ 0, %for.outer ]
%sum = phi i32 [ %add, %for.inner ], [ 0, %for.outer ]
- %arrayidx5 = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx5, align 4
%mul = mul nsw i32 %0, %i
%add = add nsw i32 %mul, %sum
%add6 = add nuw nsw i32 %j, 1
- %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* %A, i32 %i, i32 %j
- store i32 1, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [100 x i32], ptr %A, i32 %i, i32 %j
+ store i32 1, ptr %arrayidx, align 4
%add72 = add nuw nsw i32 %i, 1
%add73 = add nuw nsw i32 %j, 0
- %arrayidx8 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i32 %add72, i32 %add73
- store i32 %add, i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds [100 x i32], ptr %A, i32 %add72, i32 %add73
+ store i32 %add, ptr %arrayidx8, align 4
%exitcond = icmp eq i32 %add6, %N
br i1 %exitcond, label %for.latch, label %for.inner
; CHECK: %j.1 = phi
; CHECK: %j.2 = phi
; CHECK: %j.3 = phi
-define void @sub_sub_more([100 x i32]* noalias nocapture %A, i32 %N, i32* noalias nocapture readonly %B) {
+define void @sub_sub_more(ptr noalias nocapture %A, i32 %N, ptr noalias nocapture readonly %B) {
entry:
%cmp = icmp sgt i32 %N, 0
br i1 %cmp, label %for.outer, label %cleanup
for.inner:
%j = phi i32 [ %add6, %for.inner ], [ 0, %for.outer ]
%sum = phi i32 [ %add, %for.inner ], [ 0, %for.outer ]
- %arrayidx5 = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx5, align 4
%mul = mul nsw i32 %0, %i
%add = add nsw i32 %mul, %sum
%add6 = add nuw nsw i32 %j, 1
- %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* %A, i32 %i, i32 %j
- store i32 1, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [100 x i32], ptr %A, i32 %i, i32 %j
+ store i32 1, ptr %arrayidx, align 4
%add72 = add nuw nsw i32 %i, 1
%add73 = add nuw nsw i32 %j, 1
- %arrayidx8 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i32 %add72, i32 %add73
- store i32 %add, i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds [100 x i32], ptr %A, i32 %add72, i32 %add73
+ store i32 %add, ptr %arrayidx8, align 4
%exitcond = icmp eq i32 %add6, %N
br i1 %exitcond, label %for.latch, label %for.inner
; A[i+1][j][k-1] = 0;
; }
-define void @sub_sub_less_3d([100 x [100 x i32]]* noalias %A) {
+define void @sub_sub_less_3d(ptr noalias %A) {
entry:
br label %for.i
for.k:
%k = phi i32 [ 0, %for.j ], [ %inc.k, %for.k ]
- %arrayidx = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i32 %i, i32 %j, i32 %k
- store i32 0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [100 x [100 x i32]], ptr %A, i32 %i, i32 %j, i32 %k
+ store i32 0, ptr %arrayidx, align 4
%add.i = add nsw i32 %i, 1
%sub.k = add nsw i32 %k, -1
- %arrayidx2 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i32 %add.i, i32 %j, i32 %sub.k
- store i32 0, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds [100 x [100 x i32]], ptr %A, i32 %add.i, i32 %j, i32 %sub.k
+ store i32 0, ptr %arrayidx2, align 4
%inc.k = add nsw i32 %k, 1
%cmp.k = icmp slt i32 %inc.k, 100
br i1 %cmp.k, label %for.k, label %for.j.latch
; CHECK: %k = phi
; CHECK-NOT: %k.1 = phi
-define void @sub_sub_outer_scalar([100 x i32]* %A) {
+define void @sub_sub_outer_scalar(ptr %A) {
entry:
br label %for.i
for.k:
%k = phi i64 [ 0, %for.j ], [ %inc.k, %for.k ]
- %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %j
- %arrayidx7 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx, i64 0, i64 %k
- %0 = load i32, i32* %arrayidx7, align 4
+ %arrayidx = getelementptr inbounds [100 x i32], ptr %A, i64 %j
+ %arrayidx7 = getelementptr inbounds [100 x i32], ptr %arrayidx, i64 0, i64 %k
+ %0 = load i32, ptr %arrayidx7, align 4
%sub.j = sub nsw i64 %j, 1
- %arrayidx8 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub.j
- %arrayidx9 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx8, i64 0, i64 %k
- store i32 %0, i32* %arrayidx9, align 4
+ %arrayidx8 = getelementptr inbounds [100 x i32], ptr %A, i64 %sub.j
+ %arrayidx9 = getelementptr inbounds [100 x i32], ptr %arrayidx8, i64 0, i64 %k
+ store i32 %0, ptr %arrayidx9, align 4
%inc.k = add nsw i64 %k, 1
%cmp.k = icmp slt i64 %inc.k, 100
br i1 %cmp.k, label %for.k, label %for.j.latch
; CHECK-LABEL: disabled1
; Tests for(i) { sum = A[i]; for(j) sum += B[j]; A[i+1] = sum; }
; A[i] to A[i+1] dependency should block unrollandjam
-define void @disabled1(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @disabled1(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK: %i.029 = phi i32 [ %add10, %for.latch ], [ 0, %for.preheader ]
; CHECK: %j.026 = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
entry:
for.outer:
%i.029 = phi i32 [ %add10, %for.latch ], [ 0, %for.preheader ]
%b.028 = phi i32 [ %inc8, %for.latch ], [ 1, %for.preheader ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.029
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i.029
+ %0 = load i32, ptr %arrayidx, align 4
br label %for.inner
for.inner:
%j.026 = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
%sum1.025 = phi i32 [ %0, %for.outer ], [ %add, %for.inner ]
- %arrayidx6 = getelementptr inbounds i32, i32* %B, i32 %j.026
- %1 = load i32, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %B, i32 %j.026
+ %1 = load i32, ptr %arrayidx6, align 4
%add = add i32 %1, %sum1.025
%inc = add nuw i32 %j.026, 1
%exitcond = icmp eq i32 %inc, %J
br i1 %exitcond, label %for.latch, label %for.inner
for.latch:
- %arrayidx7 = getelementptr inbounds i32, i32* %A, i32 %b.028
- store i32 %add, i32* %arrayidx7, align 4
+ %arrayidx7 = getelementptr inbounds i32, ptr %A, i32 %b.028
+ store i32 %add, ptr %arrayidx7, align 4
%inc8 = add nuw nsw i32 %b.028, 1
%add10 = add nuw nsw i32 %i.029, 1
%exitcond30 = icmp eq i32 %add10, %I
; CHECK-LABEL: disabled2
; Tests an incompatible block layout (for.outer jumps past for.inner)
; FIXME: Make this work
-define void @disabled2(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @disabled2(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK: %i.032 = phi i32 [ %add13, %for.latch ], [ 0, %for.preheader ]
; CHECK: %j.030 = phi i32 [ %inc, %for.inner ], [ 0, %for.inner.preheader ]
entry:
for.outer:
%i.032 = phi i32 [ %add13, %for.latch ], [ 0, %for.preheader ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i32 %i.032
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %B, i32 %i.032
+ %0 = load i32, ptr %arrayidx, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %for.latch, label %for.inner
for.inner:
%j.030 = phi i32 [ %inc, %for.inner ], [ 0, %for.outer ]
%sum1.029 = phi i32 [ %sum1.1, %for.inner ], [ 0, %for.outer ]
- %arrayidx6 = getelementptr inbounds i32, i32* %B, i32 %j.030
- %1 = load i32, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %B, i32 %j.030
+ %1 = load i32, ptr %arrayidx6, align 4
%tobool7 = icmp eq i32 %1, 0
%sub = add i32 %sum1.029, 10
%add = sub i32 %sub, %1
for.latch:
%sum1.1.lcssa = phi i32 [ 0, %for.outer ], [ %sum1.1, %for.inner ]
- %arrayidx11 = getelementptr inbounds i32, i32* %A, i32 %i.032
- store i32 %sum1.1.lcssa, i32* %arrayidx11, align 4
+ %arrayidx11 = getelementptr inbounds i32, ptr %A, i32 %i.032
+ store i32 %sum1.1.lcssa, ptr %arrayidx11, align 4
%add13 = add nuw i32 %i.032, 1
%exitcond33 = icmp eq i32 %add13, %I
br i1 %exitcond33, label %for.end14, label %for.outer
; CHECK-LABEL: disabled3
; Tests loop carry dependencies in an array S
-define void @disabled3(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @disabled3(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK: %i.029 = phi i32 [ 0, %for.preheader ], [ %add12, %for.latch ]
; CHECK: %j.027 = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
entry:
br i1 %cmp, label %return, label %if.end
if.end:
- %0 = bitcast [4 x i32]* %S to i8*
%cmp128 = icmp eq i32 %I, 0
br i1 %cmp128, label %for.cond.cleanup, label %for.preheader
for.preheader:
- %arrayidx9 = getelementptr inbounds [4 x i32], [4 x i32]* %S, i32 0, i32 0
br label %for.outer
for.cond.cleanup:
for.inner:
%j.027 = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i32 %j.027
- %l2 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %B, i32 %j.027
+ %l2 = load i32, ptr %arrayidx, align 4
%add = add i32 %j.027, %i.029
%rem = urem i32 %add, %J
- %arrayidx6 = getelementptr inbounds i32, i32* %B, i32 %rem
- %l3 = load i32, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %B, i32 %rem
+ %l3 = load i32, ptr %arrayidx6, align 4
%mul = mul i32 %l3, %l2
%rem7 = urem i32 %j.027, 3
- %arrayidx8 = getelementptr inbounds [4 x i32], [4 x i32]* %S, i32 0, i32 %rem7
- store i32 %mul, i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds [4 x i32], ptr %S, i32 0, i32 %rem7
+ store i32 %mul, ptr %arrayidx8, align 4
%inc = add nuw i32 %j.027, 1
%exitcond = icmp eq i32 %inc, %J
br i1 %exitcond, label %for.latch, label %for.inner
for.latch:
- %l1 = load i32, i32* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds i32, i32* %A, i32 %i.029
- store i32 %l1, i32* %arrayidx10, align 4
+ %l1 = load i32, ptr %S, align 4
+ %arrayidx10 = getelementptr inbounds i32, ptr %A, i32 %i.029
+ store i32 %l1, ptr %arrayidx10, align 4
%add12 = add nuw i32 %i.029, 1
%exitcond31 = icmp eq i32 %add12, %I
br i1 %exitcond31, label %for.cond.cleanup, label %for.outer
; CHECK-LABEL: disabled4
; Inner looop induction variable is not consistent
; ie for(i = 0..n) for (j = 0..i) sum+=B[j]
-define void @disabled4(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @disabled4(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK: %indvars.iv = phi i32 [ %indvars.iv.next, %for.latch ], [ 1, %for.preheader ]
; CHECK: %j.021 = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
entry:
for.inner:
%j.021 = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
%sum1.020 = phi i32 [ 0, %for.outer ], [ %add, %for.inner ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i32 %j.021
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %B, i32 %j.021
+ %0 = load i32, ptr %arrayidx, align 4
%add = add i32 %0, %sum1.020
%inc = add nuw i32 %j.021, 1
%exitcond = icmp eq i32 %inc, %indvars.iv
br i1 %exitcond, label %for.latch, label %for.inner
for.latch:
- %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %indvars.iv
- store i32 %add, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %indvars.iv
+ store i32 %add, ptr %arrayidx6, align 4
%indvars.iv.next = add nuw i32 %indvars.iv, 1
%exitcond24 = icmp eq i32 %indvars.iv.next, %I
br i1 %exitcond24, label %for.end9, label %for.outer
; CHECK: %0 = phi i32 [ %f.promoted10, %entry ], [ 2, %for.latch ]
; CHECK: %1 = phi i32 [ %0, %for.outer ], [ 2, %for.inner ]
entry:
- %f.promoted10 = load i32, i32* @f, align 4
+ %f.promoted10 = load i32, ptr @f, align 4
br label %for.outer
for.outer:
; CHECK-LABEL: disabled6
; There is a dependency in here, between @d and %0 (=@f)
@d6 = hidden global i16 5, align 2
-@f6 = hidden global i16* @d6, align 4
+@f6 = hidden global ptr @d6, align 4
define i32 @disabled6() #0 {
; CHECK: %inc8.sink14.i = phi i16 [ 1, %entry ], [ %inc8.i, %for.cond.cleanup.i ]
; CHECK: %c.013.i = phi i32 [ 0, %for.body.i ], [ %inc.i, %for.body6.i ]
entry:
- store i16 1, i16* @d6, align 2
- %0 = load i16*, i16** @f6, align 4
+ store i16 1, ptr @d6, align 2
+ %0 = load ptr, ptr @f6, align 4
br label %for.body.i
for.body.i:
%inc8.sink14.i = phi i16 [ 1, %entry ], [ %inc8.i, %for.cond.cleanup.i ]
- %1 = load i16, i16* %0, align 2
+ %1 = load i16, ptr %0, align 2
br label %for.body6.i
for.cond.cleanup.i:
%inc8.i = add nuw nsw i16 %inc8.sink14.i, 1
- store i16 %inc8.i, i16* @d6, align 2
+ store i16 %inc8.i, ptr @d6, align 2
%cmp.i = icmp ult i16 %inc8.i, 6
br i1 %cmp.i, label %for.body.i, label %test.exit
; CHECK-LABEL: disabled7
; Has negative output dependency
-define void @disabled7(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @disabled7(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK: %i.028 = phi i32 [ %add11, %for.cond3.for.cond.cleanup5_crit_edge ], [ 0, %for.body.preheader ]
; CHECK: %j.026 = phi i32 [ 0, %for.body ], [ %add9, %for.body6 ]
entry:
for.body:
%i.028 = phi i32 [ %add11, %for.cond3.for.cond.cleanup5_crit_edge ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.028
- store i32 0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i.028
+ store i32 0, ptr %arrayidx, align 4
%sub = add i32 %i.028, -1
- %arrayidx2 = getelementptr inbounds i32, i32* %A, i32 %sub
- store i32 2, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %A, i32 %sub
+ store i32 2, ptr %arrayidx2, align 4
br label %for.body6
for.cond3.for.cond.cleanup5_crit_edge:
- store i32 %add, i32* %arrayidx, align 4
+ store i32 %add, ptr %arrayidx, align 4
%add11 = add nuw i32 %i.028, 1
%exitcond29 = icmp eq i32 %add11, %I
br i1 %exitcond29, label %for.end12, label %for.body
for.body6:
%0 = phi i32 [ 0, %for.body ], [ %add, %for.body6 ]
%j.026 = phi i32 [ 0, %for.body ], [ %add9, %for.body6 ]
- %arrayidx7 = getelementptr inbounds i32, i32* %B, i32 %j.026
- %1 = load i32, i32* %arrayidx7, align 4
+ %arrayidx7 = getelementptr inbounds i32, ptr %B, i32 %j.026
+ %1 = load i32, ptr %arrayidx7, align 4
%add = add i32 %1, %0
%add9 = add nuw i32 %j.026, 1
%exitcond = icmp eq i32 %add9, %J
; CHECK-LABEL: disabled8
; Same as above with an extra outer loop nest
-define void @disabled8(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @disabled8(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK: %i.036 = phi i32 [ %add15, %for.latch ], [ 0, %for.body ]
; CHECK: %j.034 = phi i32 [ 0, %for.outer ], [ %add13, %for.inner ]
entry:
for.outer:
%i.036 = phi i32 [ %add15, %for.latch ], [ 0, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.036
- store i32 0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i.036
+ store i32 0, ptr %arrayidx, align 4
%sub = add i32 %i.036, -1
- %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %sub
- store i32 2, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %sub
+ store i32 2, ptr %arrayidx6, align 4
br label %for.inner
for.latch:
- store i32 %add, i32* %arrayidx, align 4
+ store i32 %add, ptr %arrayidx, align 4
%add15 = add nuw i32 %i.036, 1
%exitcond38 = icmp eq i32 %add15, %I
br i1 %exitcond38, label %for.cond.cleanup4, label %for.outer
for.inner:
%0 = phi i32 [ 0, %for.outer ], [ %add, %for.inner ]
%j.034 = phi i32 [ 0, %for.outer ], [ %add13, %for.inner ]
- %arrayidx11 = getelementptr inbounds i32, i32* %B, i32 %j.034
- %1 = load i32, i32* %arrayidx11, align 4
+ %arrayidx11 = getelementptr inbounds i32, ptr %B, i32 %j.034
+ %1 = load i32, ptr %arrayidx11, align 4
%add = add i32 %1, %0
%add13 = add nuw i32 %j.034, 1
%exitcond = icmp eq i32 %add13, %J
; CHECK-LABEL: disabled9
; Can't prove alias between A and B
-define void @disabled9(i32 %I, i32 %J, i32* nocapture %A, i32* nocapture readonly %B) #0 {
+define void @disabled9(i32 %I, i32 %J, ptr nocapture %A, ptr nocapture readonly %B) #0 {
; CHECK: %i = phi i32 [ %add8, %for.latch ], [ 0, %for.outer.preheader ]
; CHECK: %j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
entry:
for.inner:
%j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
%sum1 = phi i32 [ 0, %for.outer ], [ %add, %for.inner ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx, align 4
%add = add i32 %0, %sum1
%inc = add nuw i32 %j, 1
%exitcond = icmp eq i32 %inc, %J
for.latch:
%add.lcssa = phi i32 [ %add, %for.inner ]
- %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 %add.lcssa, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 %add.lcssa, ptr %arrayidx6, align 4
%add8 = add nuw i32 %i, 1
%exitcond25 = icmp eq i32 %add8, %I
br i1 %exitcond25, label %for.end.loopexit, label %for.outer
; CHECK-LABEL: disable10
; Simple call
declare void @f10(i32, i32) #0
-define void @disable10(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @disable10(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK: %i = phi i32 [ %add8, %for.latch ], [ 0, %for.outer.preheader ]
; CHECK: %j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
entry:
for.inner:
%j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
%sum1 = phi i32 [ 0, %for.outer ], [ %add, %for.inner ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx, align 4
%add = add i32 %0, %sum1
%inc = add nuw i32 %j, 1
%exitcond = icmp eq i32 %inc, %J
for.latch:
%add.lcssa = phi i32 [ %add, %for.inner ]
- %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 %add.lcssa, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 %add.lcssa, ptr %arrayidx6, align 4
%add8 = add nuw i32 %i, 1
%exitcond25 = icmp eq i32 %add8, %I
br i1 %exitcond25, label %for.end.loopexit, label %for.outer
; CHECK-LABEL: disable11
; volatile
-define void @disable11(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @disable11(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK: %i = phi i32 [ %add8, %for.latch ], [ 0, %for.outer.preheader ]
; CHECK: %j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
entry:
for.inner:
%j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
%sum1 = phi i32 [ 0, %for.outer ], [ %add, %for.inner ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load volatile i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load volatile i32, ptr %arrayidx, align 4
%add = add i32 %0, %sum1
%inc = add nuw i32 %j, 1
%exitcond = icmp eq i32 %inc, %J
for.latch:
%add.lcssa = phi i32 [ %add, %for.inner ]
- %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 %add.lcssa, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 %add.lcssa, ptr %arrayidx6, align 4
%add8 = add nuw i32 %i, 1
%exitcond25 = icmp eq i32 %add8, %I
br i1 %exitcond25, label %for.end.loopexit, label %for.outer
; CHECK-LABEL: disable12
; Multiple aft blocks
-define void @disable12(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @disable12(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK: %i = phi i32 [ %add8, %for.latch3 ], [ 0, %for.outer.preheader ]
; CHECK: %j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
entry:
for.inner:
%j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
%sum1 = phi i32 [ 0, %for.outer ], [ %add, %for.inner ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx, align 4
%add = add i32 %0, %sum1
%inc = add nuw i32 %j, 1
%exitcond = icmp eq i32 %inc, %J
for.latch:
%add.lcssa = phi i32 [ %add, %for.inner ]
- %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 %add.lcssa, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 %add.lcssa, ptr %arrayidx6, align 4
%cmpl = icmp eq i32 %add.lcssa, 10
br i1 %cmpl, label %for.latch2, label %for.latch3
; CHECK-LABEL: disable13
; Two subloops
-define void @disable13(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @disable13(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK: %i = phi i32 [ %add8, %for.latch ], [ 0, %for.outer.preheader ]
; CHECK: %j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
; CHECK: %j2 = phi i32 [ %inc2, %for.inner2 ], [ 0, %for.inner2.preheader ]
for.inner:
%j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
%sum1 = phi i32 [ 0, %for.outer ], [ %add, %for.inner ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx, align 4
%add = add i32 %0, %sum1
%inc = add nuw i32 %j, 1
%exitcond = icmp eq i32 %inc, %J
for.inner2:
%j2 = phi i32 [ 0, %for.inner ], [ %inc2, %for.inner2 ]
%sum12 = phi i32 [ 0, %for.inner ], [ %add2, %for.inner2 ]
- %arrayidx2 = getelementptr inbounds i32, i32* %B, i32 %j2
- %l0 = load i32, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %B, i32 %j2
+ %l0 = load i32, ptr %arrayidx2, align 4
%add2 = add i32 %l0, %sum12
%inc2 = add nuw i32 %j2, 1
%exitcond2 = icmp eq i32 %inc2, %J
for.latch:
%add.lcssa = phi i32 [ %add, %for.inner2 ]
- %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 %add.lcssa, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 %add.lcssa, ptr %arrayidx6, align 4
%add8 = add nuw i32 %i, 1
%exitcond25 = icmp eq i32 %add8, %I
br i1 %exitcond25, label %for.end.loopexit, label %for.outer
; CHECK-LABEL: disable14
; Multiple exits blocks
-define void @disable14(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @disable14(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK: %i = phi i32 [ %add8, %for.latch ], [ 0, %for.outer.preheader ]
; CHECK: %j = phi i32 [ %inc, %for.inner ], [ 0, %for.inner.preheader ]
entry:
for.inner:
%j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
%sum1 = phi i32 [ 0, %for.outer ], [ %add, %for.inner ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx, align 4
%add = add i32 %0, %sum1
%inc = add nuw i32 %j, 1
%exitcond = icmp eq i32 %inc, %J
for.latch:
%add.lcssa = phi i32 [ %add, %for.inner ]
- %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 %add.lcssa, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 %add.lcssa, ptr %arrayidx6, align 4
%exitcond25 = icmp eq i32 %add8, %I
br i1 %exitcond25, label %for.end.loopexit, label %for.outer
; CHECK-LABEL: disable15
; Latch != exit
-define void @disable15(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @disable15(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK: %i = phi i32 [ %add8, %for.latch ], [ 0, %for.outer.preheader ]
; CHECK: %j = phi i32 [ %inc, %for.inner ], [ 0, %for.inner.preheader ]
entry:
for.inner:
%j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
%sum1 = phi i32 [ 0, %for.outer ], [ %add, %for.inner ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx, align 4
%add = add i32 %0, %sum1
%inc = add nuw i32 %j, 1
%exitcond = icmp eq i32 %inc, %J
for.latch:
%add.lcssa = phi i32 [ %add, %for.inner ]
- %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 %add.lcssa, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 %add.lcssa, ptr %arrayidx6, align 4
br label %for.outer
for.end.loopexit:
; CHECK-LABEL: disable16
; Cannot move other before inner loop
-define void @disable16(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @disable16(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK: %i = phi i32 [ %add8, %for.latch ], [ 0, %for.outer.preheader ]
; CHECK: %j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
entry:
for.inner:
%j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
%sum1 = phi i32 [ 0, %for.outer ], [ %add, %for.inner ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx, align 4
%add = add i32 %0, %sum1
%inc = add nuw i32 %j, 1
%exitcond = icmp eq i32 %inc, %J
for.latch:
%add.lcssa = phi i32 [ %add, %for.inner ]
- %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 %add.lcssa, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 %add.lcssa, ptr %arrayidx6, align 4
%add8 = add nuw i32 %i, 1
%exitcond25 = icmp eq i32 %add8, %I
- %loadarr = getelementptr inbounds i32, i32* %A, i32 %i
- %load = load i32, i32* %arrayidx6, align 4
+ %loadarr = getelementptr inbounds i32, ptr %A, i32 %i
+ %load = load i32, ptr %arrayidx6, align 4
%other = add i32 %otherphi, %load
br i1 %exitcond25, label %for.end.loopexit, label %for.outer
; CHECK-LABEL: disable_nonforced
; CHECK: load
; CHECK-NOT: load
-define void @disable_nonforced(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) {
+define void @disable_nonforced(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) {
entry:
%cmp = icmp ne i32 %J, 0
%cmp122 = icmp ne i32 %I, 0
for.inner:
%j.us = phi i32 [ 0, %for.outer ], [ %inc.us, %for.inner ]
%sum1.us = phi i32 [ 0, %for.outer ], [ %add.us, %for.inner ]
- %arrayidx.us = getelementptr inbounds i32, i32* %B, i32 %j.us
- %0 = load i32, i32* %arrayidx.us, align 4
+ %arrayidx.us = getelementptr inbounds i32, ptr %B, i32 %j.us
+ %0 = load i32, ptr %arrayidx.us, align 4
%add.us = add i32 %0, %sum1.us
%inc.us = add nuw i32 %j.us, 1
%exitcond = icmp eq i32 %inc.us, %J
for.latch:
%add.us.lcssa = phi i32 [ %add.us, %for.inner ]
- %arrayidx6.us = getelementptr inbounds i32, i32* %A, i32 %i.us
- store i32 %add.us.lcssa, i32* %arrayidx6.us, align 4
+ %arrayidx6.us = getelementptr inbounds i32, ptr %A, i32 %i.us
+ store i32 %add.us.lcssa, ptr %arrayidx6.us, align 4
%add8.us = add nuw i32 %i.us, 1
%exitcond25 = icmp eq i32 %add8.us, %I
br i1 %exitcond25, label %for.end.loopexit, label %for.outer, !llvm.loop !0
; CHECK: load
; CHECK-NOT: load
; CHECK: br i1
-define void @disable_nonforced_enable(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) {
+define void @disable_nonforced_enable(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) {
entry:
%cmp = icmp ne i32 %J, 0
%cmp122 = icmp ne i32 %I, 0
for.inner:
%j.us = phi i32 [ 0, %for.outer ], [ %inc.us, %for.inner ]
%sum1.us = phi i32 [ 0, %for.outer ], [ %add.us, %for.inner ]
- %arrayidx.us = getelementptr inbounds i32, i32* %B, i32 %j.us
- %0 = load i32, i32* %arrayidx.us, align 4
+ %arrayidx.us = getelementptr inbounds i32, ptr %B, i32 %j.us
+ %0 = load i32, ptr %arrayidx.us, align 4
%add.us = add i32 %0, %sum1.us
%inc.us = add nuw i32 %j.us, 1
%exitcond = icmp eq i32 %inc.us, %J
for.latch:
%add.us.lcssa = phi i32 [ %add.us, %for.inner ]
- %arrayidx6.us = getelementptr inbounds i32, i32* %A, i32 %i.us
- store i32 %add.us.lcssa, i32* %arrayidx6.us, align 4
+ %arrayidx6.us = getelementptr inbounds i32, ptr %A, i32 %i.us
+ store i32 %add.us.lcssa, ptr %arrayidx6.us, align 4
%add8.us = add nuw i32 %i.us, 1
%exitcond25 = icmp eq i32 %add8.us, %I
br i1 %exitcond25, label %for.end.loopexit, label %for.outer, !llvm.loop !0
; CHECK: load
; CHECK-NOT: load
; CHECK: br i1
-define void @disable_nonforced_enable(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) {
+define void @disable_nonforced_enable(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) {
entry:
%cmp = icmp ne i32 %J, 0
%cmp122 = icmp ne i32 %I, 0
for.inner:
%j.us = phi i32 [ 0, %for.outer ], [ %inc.us, %for.inner ]
%sum1.us = phi i32 [ 0, %for.outer ], [ %add.us, %for.inner ]
- %arrayidx.us = getelementptr inbounds i32, i32* %B, i32 %j.us
- %0 = load i32, i32* %arrayidx.us, align 4
+ %arrayidx.us = getelementptr inbounds i32, ptr %B, i32 %j.us
+ %0 = load i32, ptr %arrayidx.us, align 4
%add.us = add i32 %0, %sum1.us
%inc.us = add nuw i32 %j.us, 1
%exitcond = icmp eq i32 %inc.us, %J
for.latch:
%add.us.lcssa = phi i32 [ %add.us, %for.inner ]
- %arrayidx6.us = getelementptr inbounds i32, i32* %A, i32 %i.us
- store i32 %add.us.lcssa, i32* %arrayidx6.us, align 4
+ %arrayidx6.us = getelementptr inbounds i32, ptr %A, i32 %i.us
+ store i32 %add.us.lcssa, ptr %arrayidx6.us, align 4
%add8.us = add nuw i32 %i.us, 1
%exitcond25 = icmp eq i32 %add8.us, %I
br i1 %exitcond25, label %for.end.loopexit, label %for.outer, !llvm.loop !0
;
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
-define void @followup(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) {
+define void @followup(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) {
entry:
%cmp = icmp ne i32 %J, 0
%cmp122 = icmp ne i32 %I, 0
for.inner:
%j.us = phi i32 [ 0, %for.outer ], [ %inc.us, %for.inner ]
%sum1.us = phi i32 [ 0, %for.outer ], [ %add.us, %for.inner ]
- %arrayidx.us = getelementptr inbounds i32, i32* %B, i32 %j.us
- %0 = load i32, i32* %arrayidx.us, align 4
+ %arrayidx.us = getelementptr inbounds i32, ptr %B, i32 %j.us
+ %0 = load i32, ptr %arrayidx.us, align 4
%add.us = add i32 %0, %sum1.us
%inc.us = add nuw i32 %j.us, 1
%exitcond = icmp eq i32 %inc.us, %J
for.latch:
%add.us.lcssa = phi i32 [ %add.us, %for.inner ]
- %arrayidx6.us = getelementptr inbounds i32, i32* %A, i32 %i.us
- store i32 %add.us.lcssa, i32* %arrayidx6.us, align 4
+ %arrayidx6.us = getelementptr inbounds i32, ptr %A, i32 %i.us
+ store i32 %add.us.lcssa, ptr %arrayidx6.us, align 4
%add8.us = add nuw i32 %i.us, 1
%exitcond25 = icmp eq i32 %add8.us, %I
br i1 %exitcond25, label %for.end.loopexit, label %for.outer, !llvm.loop !0
%k.01 = phi i32 [ 0, %for.cond23.preheader ], [ %inc43, %for.body25 ]
%idxprom26 = zext i32 %i.13 to i64
%idxprom28 = zext i32 %j.12 to i64
- %arrayidx29 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %C, i64 0, i64 %idxprom26, i64 %idxprom28
- %0 = load i32, i32* %arrayidx29, align 4
+ %arrayidx29 = getelementptr inbounds [8 x [8 x i32]], ptr %C, i64 0, i64 %idxprom26, i64 %idxprom28
+ %0 = load i32, ptr %arrayidx29, align 4
%idxprom30 = zext i32 %i.13 to i64
%idxprom32 = zext i32 %k.01 to i64
- %arrayidx33 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %A, i64 0, i64 %idxprom30, i64 %idxprom32
- %1 = load i32, i32* %arrayidx33, align 4
+ %arrayidx33 = getelementptr inbounds [8 x [8 x i32]], ptr %A, i64 0, i64 %idxprom30, i64 %idxprom32
+ %1 = load i32, ptr %arrayidx33, align 4
%idxprom34 = zext i32 %k.01 to i64
%idxprom36 = zext i32 %j.12 to i64
- %arrayidx37 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %B, i64 0, i64 %idxprom34, i64 %idxprom36
- %2 = load i32, i32* %arrayidx37, align 4
+ %arrayidx37 = getelementptr inbounds [8 x [8 x i32]], ptr %B, i64 0, i64 %idxprom34, i64 %idxprom36
+ %2 = load i32, ptr %arrayidx37, align 4
%mul = mul nsw i32 %1, %2
%add = add nsw i32 %0, %mul
%idxprom38 = zext i32 %i.13 to i64
%idxprom40 = zext i32 %j.12 to i64
- %arrayidx41 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %C, i64 0, i64 %idxprom38, i64 %idxprom40
- store i32 %add, i32* %arrayidx41, align 4
+ %arrayidx41 = getelementptr inbounds [8 x [8 x i32]], ptr %C, i64 0, i64 %idxprom38, i64 %idxprom40
+ store i32 %add, ptr %arrayidx41, align 4
%inc43 = add nuw nsw i32 %k.01, 1
%cmp24 = icmp ult i32 %k.01, 7
br i1 %cmp24, label %for.body25, label %for.inc45
define void @h() {
bb:
- store i32 4, i32* @e, align 4
- %i15 = load i16, i16* @b, align 2
+ store i32 4, ptr @e, align 4
+ %i15 = load i16, ptr @b, align 2
%i17 = icmp slt i16 %i15, 1
br label %bb8
br i1 %i17, label %bb46.preheader, label %bb43
bb46.preheader: ; preds = %bb24
- store i16 %storemerge312, i16* @f, align 2
+ store i16 %storemerge312, ptr @f, align 2
br label %bb46
bb43: ; preds = %bb24
bb47: ; preds = %bb43
%i49 = add nsw i32 %storemerge15, -1
- store i32 %i49, i32* @e, align 4
+ store i32 %i49, ptr @e, align 4
%i7.not = icmp eq i32 %i49, 0
br i1 %i7.not, label %bb50, label %bb8
bb50: ; preds = %bb47
- store i16 %i45, i16* @f, align 2
+ store i16 %i45, ptr @f, align 2
ret void
}
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
-define void @test1(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @test1(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
entry:
%cmp = icmp ne i32 %J, 0
%cmpJ = icmp ne i32 %I, 0
for.inner:
%j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
%sum = phi i32 [ 0, %for.outer ], [ %add, %for.inner ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx, align 4, !tbaa !5
+ %arrayidx = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx, align 4, !tbaa !5
%add = add i32 %0, %sum
%inc = add nuw i32 %j, 1
%exitcond = icmp eq i32 %inc, %J
for.latch:
%add.lcssa = phi i32 [ %add, %for.inner ]
- %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 %add.lcssa, i32* %arrayidx6, align 4, !tbaa !5
+ %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 %add.lcssa, ptr %arrayidx6, align 4, !tbaa !5
%add8 = add nuw i32 %i, 1
%exitcond25 = icmp eq i32 %add8, %I
br i1 %exitcond25, label %for.end.loopexit, label %for.outer
; The explicit metadata here should force this to be unroll and jammed 4 times (hence the %.pre60.3)
; CHECK: %.pre = phi i8 [ %.pre60.3, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ %.pre.pre, %for.cond1.preheader.us.preheader.new ]
; CHECK: %indvars.iv.3 = phi i64 [ 0, %for.cond1.preheader.us ], [ %indvars.iv.next.3, %for.body4.us ]
-define void @function(i8* noalias nocapture %dst, i32 %dst_stride, i8* noalias nocapture readonly %src, i32 %src_stride, i32 %A, i32 %B, i32 %C, i32 %D, i32 %width, i32 %height) {
+define void @function(ptr noalias nocapture %dst, i32 %dst_stride, ptr noalias nocapture readonly %src, i32 %src_stride, i32 %A, i32 %B, i32 %C, i32 %D, i32 %width, i32 %height) {
entry:
%idxprom = sext i32 %src_stride to i64
%cmp52 = icmp sgt i32 %height, 0
br i1 %cmp249, label %for.cond1.preheader.us.preheader, label %for.cond.cleanup
for.cond1.preheader.us.preheader: ; preds = %for.cond1.preheader.lr.ph
- %.pre.pre = load i8, i8* %src, align 1
+ %.pre.pre = load i8, ptr %src, align 1
%wide.trip.count = zext i32 %width to i64
br label %for.cond1.preheader.us
for.cond1.preheader.us: ; preds = %for.cond1.for.cond.cleanup3_crit_edge.us, %for.cond1.preheader.us.preheader
%.pre = phi i8 [ %.pre60, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ %.pre.pre, %for.cond1.preheader.us.preheader ]
- %srcp.056.us.pn = phi i8* [ %srcp.056.us, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ %src, %for.cond1.preheader.us.preheader ]
+ %srcp.056.us.pn = phi ptr [ %srcp.056.us, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ %src, %for.cond1.preheader.us.preheader ]
%y.055.us = phi i32 [ %inc30.us, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ 0, %for.cond1.preheader.us.preheader ]
- %dst.addr.054.us = phi i8* [ %add.ptr.us, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ %dst, %for.cond1.preheader.us.preheader ]
- %srcp.056.us = getelementptr inbounds i8, i8* %srcp.056.us.pn, i64 %idxprom
- %.pre60 = load i8, i8* %srcp.056.us, align 1
+ %dst.addr.054.us = phi ptr [ %add.ptr.us, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ %dst, %for.cond1.preheader.us.preheader ]
+ %srcp.056.us = getelementptr inbounds i8, ptr %srcp.056.us.pn, i64 %idxprom
+ %.pre60 = load i8, ptr %srcp.056.us, align 1
br label %for.body4.us
for.body4.us: ; preds = %for.body4.us, %for.cond1.preheader.us
%conv.us = zext i8 %1 to i32
%mul.us = mul nsw i32 %conv.us, %A
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %arrayidx8.us = getelementptr inbounds i8, i8* %srcp.056.us.pn, i64 %indvars.iv.next
- %2 = load i8, i8* %arrayidx8.us, align 1
+ %arrayidx8.us = getelementptr inbounds i8, ptr %srcp.056.us.pn, i64 %indvars.iv.next
+ %2 = load i8, ptr %arrayidx8.us, align 1
%conv9.us = zext i8 %2 to i32
%mul10.us = mul nsw i32 %conv9.us, %B
%conv14.us = zext i8 %0 to i32
%mul15.us = mul nsw i32 %conv14.us, %C
- %arrayidx19.us = getelementptr inbounds i8, i8* %srcp.056.us, i64 %indvars.iv.next
- %3 = load i8, i8* %arrayidx19.us, align 1
+ %arrayidx19.us = getelementptr inbounds i8, ptr %srcp.056.us, i64 %indvars.iv.next
+ %3 = load i8, ptr %arrayidx19.us, align 1
%conv20.us = zext i8 %3 to i32
%mul21.us = mul nsw i32 %conv20.us, %D
%add11.us = add i32 %mul.us, 32
%add23.us = add i32 %add22.us, %mul21.us
%4 = lshr i32 %add23.us, 6
%conv24.us = trunc i32 %4 to i8
- %arrayidx26.us = getelementptr inbounds i8, i8* %dst.addr.054.us, i64 %indvars.iv
- store i8 %conv24.us, i8* %arrayidx26.us, align 1
+ %arrayidx26.us = getelementptr inbounds i8, ptr %dst.addr.054.us, i64 %indvars.iv
+ store i8 %conv24.us, ptr %arrayidx26.us, align 1
%exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
br i1 %exitcond, label %for.cond1.for.cond.cleanup3_crit_edge.us, label %for.body4.us
for.cond1.for.cond.cleanup3_crit_edge.us: ; preds = %for.body4.us
- %add.ptr.us = getelementptr inbounds i8, i8* %dst.addr.054.us, i64 %idx.ext
+ %add.ptr.us = getelementptr inbounds i8, ptr %dst.addr.054.us, i64 %idx.ext
%inc30.us = add nuw nsw i32 %y.055.us, 1
%exitcond58 = icmp eq i32 %inc30.us, %height
br i1 %exitcond58, label %for.cond.cleanup, label %for.cond1.preheader.us, !llvm.loop !5
; the count is left to thresholds. In this case 2 (hence %.pre60.1).
; CHECK: %.pre = phi i8 [ %.pre60.1, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ %.pre.pre, %for.cond1.preheader.us.preheader.new ]
; CHECK: %indvars.iv.1 = phi i64 [ 0, %for.cond1.preheader.us ], [ %indvars.iv.next.1, %for.body4.us ]
-define void @function2(i8* noalias nocapture %dst, i32 %dst_stride, i8* noalias nocapture readonly %src, i32 %src_stride, i32 %A, i32 %B, i32 %C, i32 %D, i32 %width, i32 %height) {
+define void @function2(ptr noalias nocapture %dst, i32 %dst_stride, ptr noalias nocapture readonly %src, i32 %src_stride, i32 %A, i32 %B, i32 %C, i32 %D, i32 %width, i32 %height) {
entry:
%idxprom = sext i32 %src_stride to i64
%cmp52 = icmp sgt i32 %height, 0
br i1 %cmp249, label %for.cond1.preheader.us.preheader, label %for.cond.cleanup
for.cond1.preheader.us.preheader: ; preds = %for.cond1.preheader.lr.ph
- %.pre.pre = load i8, i8* %src, align 1
+ %.pre.pre = load i8, ptr %src, align 1
%wide.trip.count = zext i32 %width to i64
br label %for.cond1.preheader.us
for.cond1.preheader.us: ; preds = %for.cond1.for.cond.cleanup3_crit_edge.us, %for.cond1.preheader.us.preheader
%.pre = phi i8 [ %.pre60, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ %.pre.pre, %for.cond1.preheader.us.preheader ]
- %srcp.056.us.pn = phi i8* [ %srcp.056.us, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ %src, %for.cond1.preheader.us.preheader ]
+ %srcp.056.us.pn = phi ptr [ %srcp.056.us, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ %src, %for.cond1.preheader.us.preheader ]
%y.055.us = phi i32 [ %inc30.us, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ 0, %for.cond1.preheader.us.preheader ]
- %dst.addr.054.us = phi i8* [ %add.ptr.us, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ %dst, %for.cond1.preheader.us.preheader ]
- %srcp.056.us = getelementptr inbounds i8, i8* %srcp.056.us.pn, i64 %idxprom
- %.pre60 = load i8, i8* %srcp.056.us, align 1
+ %dst.addr.054.us = phi ptr [ %add.ptr.us, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ %dst, %for.cond1.preheader.us.preheader ]
+ %srcp.056.us = getelementptr inbounds i8, ptr %srcp.056.us.pn, i64 %idxprom
+ %.pre60 = load i8, ptr %srcp.056.us, align 1
br label %for.body4.us
for.body4.us: ; preds = %for.body4.us, %for.cond1.preheader.us
%conv.us = zext i8 %1 to i32
%mul.us = mul nsw i32 %conv.us, %A
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %arrayidx8.us = getelementptr inbounds i8, i8* %srcp.056.us.pn, i64 %indvars.iv.next
- %2 = load i8, i8* %arrayidx8.us, align 1
+ %arrayidx8.us = getelementptr inbounds i8, ptr %srcp.056.us.pn, i64 %indvars.iv.next
+ %2 = load i8, ptr %arrayidx8.us, align 1
%conv9.us = zext i8 %2 to i32
%mul10.us = mul nsw i32 %conv9.us, %B
%conv14.us = zext i8 %0 to i32
%mul15.us = mul nsw i32 %conv14.us, %C
- %arrayidx19.us = getelementptr inbounds i8, i8* %srcp.056.us, i64 %indvars.iv.next
- %3 = load i8, i8* %arrayidx19.us, align 1
+ %arrayidx19.us = getelementptr inbounds i8, ptr %srcp.056.us, i64 %indvars.iv.next
+ %3 = load i8, ptr %arrayidx19.us, align 1
%conv20.us = zext i8 %3 to i32
%mul21.us = mul nsw i32 %conv20.us, %D
%add11.us = add i32 %mul.us, 32
%add23.us = add i32 %add22.us, %mul21.us
%4 = lshr i32 %add23.us, 6
%conv24.us = trunc i32 %4 to i8
- %arrayidx26.us = getelementptr inbounds i8, i8* %dst.addr.054.us, i64 %indvars.iv
- store i8 %conv24.us, i8* %arrayidx26.us, align 1
+ %arrayidx26.us = getelementptr inbounds i8, ptr %dst.addr.054.us, i64 %indvars.iv
+ store i8 %conv24.us, ptr %arrayidx26.us, align 1
%exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
br i1 %exitcond, label %for.cond1.for.cond.cleanup3_crit_edge.us, label %for.body4.us
for.cond1.for.cond.cleanup3_crit_edge.us: ; preds = %for.body4.us
- %add.ptr.us = getelementptr inbounds i8, i8* %dst.addr.054.us, i64 %idx.ext
+ %add.ptr.us = getelementptr inbounds i8, ptr %dst.addr.054.us, i64 %idx.ext
%inc30.us = add nuw nsw i32 %y.055.us, 1
%exitcond58 = icmp eq i32 %inc30.us, %height
br i1 %exitcond58, label %for.cond.cleanup, label %for.cond1.preheader.us, !llvm.loop !7
; CHECK-LABEL: test1
; Basic check that these loops are by default UnJ'd
-define void @test1(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) {
+define void @test1(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) {
; CHECK: %i.us = phi i32 [ %add8.us.{{[1-9]*}}, %for.latch ], [ 0, %for.outer.preheader.new ]
; CHECK-LOWTHRES: %i.us = phi i32 [ %add8.us, %for.latch ], [ 0, %for.outer.preheader ]
entry:
for.inner:
%j.us = phi i32 [ 0, %for.outer ], [ %inc.us, %for.inner ]
%sum1.us = phi i32 [ 0, %for.outer ], [ %add.us, %for.inner ]
- %arrayidx.us = getelementptr inbounds i32, i32* %B, i32 %j.us
- %0 = load i32, i32* %arrayidx.us, align 4
+ %arrayidx.us = getelementptr inbounds i32, ptr %B, i32 %j.us
+ %0 = load i32, ptr %arrayidx.us, align 4
%add.us = add i32 %0, %sum1.us
%inc.us = add nuw i32 %j.us, 1
%exitcond = icmp eq i32 %inc.us, %J
for.latch:
%add.us.lcssa = phi i32 [ %add.us, %for.inner ]
- %arrayidx6.us = getelementptr inbounds i32, i32* %A, i32 %i.us
- store i32 %add.us.lcssa, i32* %arrayidx6.us, align 4
+ %arrayidx6.us = getelementptr inbounds i32, ptr %A, i32 %i.us
+ store i32 %add.us.lcssa, ptr %arrayidx6.us, align 4
%add8.us = add nuw i32 %i.us, 1
%exitcond25 = icmp eq i32 %add8.us, %I
br i1 %exitcond25, label %for.end.loopexit, label %for.outer
; CHECK-LABEL: nounroll_and_jam
; #pragma nounroll_and_jam
-define void @nounroll_and_jam(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) {
+define void @nounroll_and_jam(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) {
; CHECK: %i.us = phi i32 [ %add8.us, %for.latch ], [ 0, %for.outer.preheader ]
entry:
%cmp = icmp ne i32 %J, 0
for.inner:
%j.us = phi i32 [ 0, %for.outer ], [ %inc.us, %for.inner ]
%sum1.us = phi i32 [ 0, %for.outer ], [ %add.us, %for.inner ]
- %arrayidx.us = getelementptr inbounds i32, i32* %B, i32 %j.us
- %0 = load i32, i32* %arrayidx.us, align 4
+ %arrayidx.us = getelementptr inbounds i32, ptr %B, i32 %j.us
+ %0 = load i32, ptr %arrayidx.us, align 4
%add.us = add i32 %0, %sum1.us
%inc.us = add nuw i32 %j.us, 1
%exitcond = icmp eq i32 %inc.us, %J
for.latch:
%add.us.lcssa = phi i32 [ %add.us, %for.inner ]
- %arrayidx6.us = getelementptr inbounds i32, i32* %A, i32 %i.us
- store i32 %add.us.lcssa, i32* %arrayidx6.us, align 4
+ %arrayidx6.us = getelementptr inbounds i32, ptr %A, i32 %i.us
+ store i32 %add.us.lcssa, ptr %arrayidx6.us, align 4
%add8.us = add nuw i32 %i.us, 1
%exitcond25 = icmp eq i32 %add8.us, %I
br i1 %exitcond25, label %for.end.loopexit, label %for.outer, !llvm.loop !1
; CHECK-LABEL: unroll_and_jam_count
; #pragma unroll_and_jam(8)
-define void @unroll_and_jam_count(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) {
+define void @unroll_and_jam_count(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) {
; CHECK: %i.us = phi i32 [ %add8.us.7, %for.latch ], [ 0, %for.outer.preheader.new ]
entry:
%cmp = icmp ne i32 %J, 0
for.inner:
%j.us = phi i32 [ 0, %for.outer ], [ %inc.us, %for.inner ]
%sum1.us = phi i32 [ 0, %for.outer ], [ %add.us, %for.inner ]
- %arrayidx.us = getelementptr inbounds i32, i32* %B, i32 %j.us
- %0 = load i32, i32* %arrayidx.us, align 4
+ %arrayidx.us = getelementptr inbounds i32, ptr %B, i32 %j.us
+ %0 = load i32, ptr %arrayidx.us, align 4
%add.us = add i32 %0, %sum1.us
%inc.us = add nuw i32 %j.us, 1
%exitcond = icmp eq i32 %inc.us, %J
for.latch:
%add.us.lcssa = phi i32 [ %add.us, %for.inner ]
- %arrayidx6.us = getelementptr inbounds i32, i32* %A, i32 %i.us
- store i32 %add.us.lcssa, i32* %arrayidx6.us, align 4
+ %arrayidx6.us = getelementptr inbounds i32, ptr %A, i32 %i.us
+ store i32 %add.us.lcssa, ptr %arrayidx6.us, align 4
%add8.us = add nuw i32 %i.us, 1
%exitcond25 = icmp eq i32 %add8.us, %I
br i1 %exitcond25, label %for.end.loopexit, label %for.outer, !llvm.loop !3
; CHECK-LABEL: unroll_and_jam
; #pragma unroll_and_jam
-define void @unroll_and_jam(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) {
+define void @unroll_and_jam(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) {
; CHECK: %i.us = phi i32 [ %add8.us.{{[1-9]*}}, %for.latch ], [ 0, %for.outer.preheader.new ]
; CHECK-LOWTHRES: %i.us = phi i32 [ %add8.us.{{[1-9]*}}, %for.latch ], [ 0, %for.outer.preheader.new ]
entry:
for.inner:
%j.us = phi i32 [ 0, %for.outer ], [ %inc.us, %for.inner ]
%sum1.us = phi i32 [ 0, %for.outer ], [ %add.us, %for.inner ]
- %arrayidx.us = getelementptr inbounds i32, i32* %B, i32 %j.us
- %0 = load i32, i32* %arrayidx.us, align 4
+ %arrayidx.us = getelementptr inbounds i32, ptr %B, i32 %j.us
+ %0 = load i32, ptr %arrayidx.us, align 4
%add.us = add i32 %0, %sum1.us
%inc.us = add nuw i32 %j.us, 1
%exitcond = icmp eq i32 %inc.us, %J
for.latch:
%add.us.lcssa = phi i32 [ %add.us, %for.inner ]
- %arrayidx6.us = getelementptr inbounds i32, i32* %A, i32 %i.us
- store i32 %add.us.lcssa, i32* %arrayidx6.us, align 4
+ %arrayidx6.us = getelementptr inbounds i32, ptr %A, i32 %i.us
+ store i32 %add.us.lcssa, ptr %arrayidx6.us, align 4
%add8.us = add nuw i32 %i.us, 1
%exitcond25 = icmp eq i32 %add8.us, %I
br i1 %exitcond25, label %for.end.loopexit, label %for.outer, !llvm.loop !5
; CHECK-LABEL: nounroll
; #pragma nounroll (which we take to mean disable unroll and jam too)
-define void @nounroll(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) {
+define void @nounroll(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) {
; CHECK: %i.us = phi i32 [ %add8.us, %for.latch ], [ 0, %for.outer.preheader ]
entry:
%cmp = icmp ne i32 %J, 0
for.inner:
%j.us = phi i32 [ 0, %for.outer ], [ %inc.us, %for.inner ]
%sum1.us = phi i32 [ 0, %for.outer ], [ %add.us, %for.inner ]
- %arrayidx.us = getelementptr inbounds i32, i32* %B, i32 %j.us
- %0 = load i32, i32* %arrayidx.us, align 4
+ %arrayidx.us = getelementptr inbounds i32, ptr %B, i32 %j.us
+ %0 = load i32, ptr %arrayidx.us, align 4
%add.us = add i32 %0, %sum1.us
%inc.us = add nuw i32 %j.us, 1
%exitcond = icmp eq i32 %inc.us, %J
for.latch:
%add.us.lcssa = phi i32 [ %add.us, %for.inner ]
- %arrayidx6.us = getelementptr inbounds i32, i32* %A, i32 %i.us
- store i32 %add.us.lcssa, i32* %arrayidx6.us, align 4
+ %arrayidx6.us = getelementptr inbounds i32, ptr %A, i32 %i.us
+ store i32 %add.us.lcssa, ptr %arrayidx6.us, align 4
%add8.us = add nuw i32 %i.us, 1
%exitcond25 = icmp eq i32 %add8.us, %I
br i1 %exitcond25, label %for.end.loopexit, label %for.outer, !llvm.loop !7
; CHECK-LABEL: unroll
; #pragma unroll (which we take to mean disable unroll and jam)
-define void @unroll(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) {
+define void @unroll(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) {
; CHECK: %i.us = phi i32 [ %add8.us, %for.latch ], [ 0, %for.outer.preheader ]
entry:
%cmp = icmp ne i32 %J, 0
for.inner:
%j.us = phi i32 [ 0, %for.outer ], [ %inc.us, %for.inner ]
%sum1.us = phi i32 [ 0, %for.outer ], [ %add.us, %for.inner ]
- %arrayidx.us = getelementptr inbounds i32, i32* %B, i32 %j.us
- %0 = load i32, i32* %arrayidx.us, align 4
+ %arrayidx.us = getelementptr inbounds i32, ptr %B, i32 %j.us
+ %0 = load i32, ptr %arrayidx.us, align 4
%add.us = add i32 %0, %sum1.us
%inc.us = add nuw i32 %j.us, 1
%exitcond = icmp eq i32 %inc.us, %J
for.latch:
%add.us.lcssa = phi i32 [ %add.us, %for.inner ]
- %arrayidx6.us = getelementptr inbounds i32, i32* %A, i32 %i.us
- store i32 %add.us.lcssa, i32* %arrayidx6.us, align 4
+ %arrayidx6.us = getelementptr inbounds i32, ptr %A, i32 %i.us
+ store i32 %add.us.lcssa, ptr %arrayidx6.us, align 4
%add8.us = add nuw i32 %i.us, 1
%exitcond25 = icmp eq i32 %add8.us, %I
br i1 %exitcond25, label %for.end.loopexit, label %for.outer, !llvm.loop !9
; CHECK-LABEL: nounroll_plus_unroll_and_jam
; #pragma clang loop nounroll, unroll_and_jam (which we take to mean do unroll_and_jam)
-define void @nounroll_plus_unroll_and_jam(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) {
+define void @nounroll_plus_unroll_and_jam(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) {
; CHECK: %i.us = phi i32 [ %add8.us.{{[1-9]*}}, %for.latch ], [ 0, %for.outer.preheader.new ]
entry:
%cmp = icmp ne i32 %J, 0
for.inner:
%j.us = phi i32 [ 0, %for.outer ], [ %inc.us, %for.inner ]
%sum1.us = phi i32 [ 0, %for.outer ], [ %add.us, %for.inner ]
- %arrayidx.us = getelementptr inbounds i32, i32* %B, i32 %j.us
- %0 = load i32, i32* %arrayidx.us, align 4
+ %arrayidx.us = getelementptr inbounds i32, ptr %B, i32 %j.us
+ %0 = load i32, ptr %arrayidx.us, align 4
%add.us = add i32 %0, %sum1.us
%inc.us = add nuw i32 %j.us, 1
%exitcond = icmp eq i32 %inc.us, %J
for.latch:
%add.us.lcssa = phi i32 [ %add.us, %for.inner ]
- %arrayidx6.us = getelementptr inbounds i32, i32* %A, i32 %i.us
- store i32 %add.us.lcssa, i32* %arrayidx6.us, align 4
+ %arrayidx6.us = getelementptr inbounds i32, ptr %A, i32 %i.us
+ store i32 %add.us.lcssa, ptr %arrayidx6.us, align 4
%add8.us = add nuw i32 %i.us, 1
%exitcond25 = icmp eq i32 %add8.us, %I
br i1 %exitcond25, label %for.end.loopexit, label %for.outer, !llvm.loop !11
; CHECK-LABEL: unprof1
; Multiple inner loop blocks
-define void @unprof1(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @unprof1(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK: %i = phi i32 [ %addinc, %for.latch ], [ 0, %for.outer.preheader ]
; CHECK: %j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner2 ]
entry:
for.inner:
%j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner2 ]
%sum1 = phi i32 [ 0, %for.outer ], [ %add, %for.inner2 ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx, align 4
%add = add i32 %0, %sum1
br label %for.inner2
for.latch:
%add.lcssa = phi i32 [ %add, %for.inner2 ]
- %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 %add.lcssa, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 %add.lcssa, ptr %arrayidx6, align 4
%addinc = add nuw i32 %i, 1
%exitcond25 = icmp eq i32 %addinc, %I
br i1 %exitcond25, label %for.loopexit, label %for.outer
; CHECK-LABEL: unprof2
; Constant inner loop count
-define void @unprof2(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @unprof2(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK: %i = phi i32 [ %addinc, %for.latch ], [ 0, %for.outer.preheader ]
; CHECK: %j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
entry:
for.inner:
%j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
%sum1 = phi i32 [ 0, %for.outer ], [ %add, %for.inner ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx, align 4
%add = add i32 %0, %sum1
%inc = add nuw i32 %j, 1
%exitcond = icmp eq i32 %inc, 10
for.latch:
%add.lcssa = phi i32 [ %add, %for.inner ]
- %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 %add.lcssa, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 %add.lcssa, ptr %arrayidx6, align 4
%addinc = add nuw i32 %i, 1
%exitcond25 = icmp eq i32 %addinc, %I
br i1 %exitcond25, label %for.loopexit, label %for.outer
; CHECK-LABEL: unprof3
; Complex inner loop
-define void @unprof3(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @unprof3(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK: %i = phi i32 [ %addinc, %for.latch ], [ 0, %for.outer.preheader ]
; CHECK: %j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
entry:
for.inner:
%j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
%sum1 = phi i32 [ 0, %for.outer ], [ %add, %for.inner ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx, align 4
%add = add i32 %0, %sum1
%add0 = add i32 %0, %sum1
%add1 = add i32 %0, %sum1
for.latch:
%add.lcssa = phi i32 [ %add, %for.inner ]
- %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 %add.lcssa, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 %add.lcssa, ptr %arrayidx6, align 4
%addinc = add nuw i32 %i, 1
%exitcond25 = icmp eq i32 %addinc, %I
br i1 %exitcond25, label %for.loopexit, label %for.outer
; CHECK-LABEL: unprof4
; No loop invariant loads
-define void @unprof4(i32 %I, i32 %J, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @unprof4(i32 %I, i32 %J, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK: %i = phi i32 [ %addinc, %for.latch ], [ 0, %for.outer.preheader ]
; CHECK: %j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
entry:
%j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
%sum1 = phi i32 [ 0, %for.outer ], [ %add, %for.inner ]
%j2 = add i32 %j, %i
- %arrayidx = getelementptr inbounds i32, i32* %B, i32 %j2
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %B, i32 %j2
+ %0 = load i32, ptr %arrayidx, align 4
%add = add i32 %0, %sum1
%inc = add nuw i32 %j, 1
%exitcond = icmp eq i32 %inc, %J
for.latch:
%add.lcssa = phi i32 [ %add, %for.inner ]
- %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 %add.lcssa, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 %add.lcssa, ptr %arrayidx6, align 4
%addinc = add nuw i32 %i, 1
%exitcond25 = icmp eq i32 %addinc, %I
br i1 %exitcond25, label %for.loopexit, label %for.outer
@a = common dso_local local_unnamed_addr global i32 0, align 4
@b = common dso_local local_unnamed_addr global i8 0, align 1
@e = common dso_local local_unnamed_addr global i64 0, align 8
-@c = common dso_local local_unnamed_addr global i32* null, align 8
+@c = common dso_local local_unnamed_addr global ptr null, align 8
@g = common dso_local local_unnamed_addr global i64 0, align 8
@f = common dso_local local_unnamed_addr global i32 0, align 4
; CHECK: for.end27
define dso_local void @test1(i32 %i) {
entry:
- %0 = load i32, i32* @a, align 4, !tbaa !1
+ %0 = load i32, ptr @a, align 4, !tbaa !1
%tobool40 = icmp eq i32 %0, 0
br i1 %tobool40, label %for.end27, label %for.cond1.preheader.lr.ph
for.cond1.preheader.lr.ph: ; preds = %entry
- %1 = load i32*, i32** @c, align 8, !tbaa !5
+ %1 = load ptr, ptr @c, align 8, !tbaa !5
br label %for.cond1.preheader
for.cond1.preheader: ; preds = %for.cond1.preheader.lr.ph, %for.cond13.preheader
%tobool21.8.9 = icmp ne i32 %xor.7.9, 0
%lor.ext.8.9 = zext i1 %tobool21.8.9 to i32
%xor.8.9 = xor i32 %xor.7.9, %lor.ext.8.9
- store i32 10, i32* @f, align 4, !tbaa !1
- %2 = load i32, i32* @a, align 4, !tbaa !1
+ store i32 10, ptr @f, align 4, !tbaa !1
+ %2 = load i32, ptr @a, align 4, !tbaa !1
%tobool = icmp eq i32 %2, 0
br i1 %tobool, label %for.cond.for.end27_crit_edge, label %for.cond1.preheader
for.cond4.preheader: ; preds = %for.cond1.preheader, %for.cond4.preheader
%j.035 = phi i32 [ 9, %for.cond1.preheader ], [ %dec11, %for.cond4.preheader ]
- %3 = load i8, i8* @b, align 1, !tbaa !7
+ %3 = load i8, ptr @b, align 1, !tbaa !7
%conv = zext i8 %3 to i32
%cmp = icmp sgt i32 %i.addr.041, %conv
%conv7 = zext i1 %cmp to i32
- store i32 %conv7, i32* %1, align 4, !tbaa !1
- %4 = load i8, i8* @b, align 1, !tbaa !7
+ store i32 %conv7, ptr %1, align 4, !tbaa !1
+ %4 = load i8, ptr @b, align 1, !tbaa !7
%conv.1 = zext i8 %4 to i32
%cmp.1 = icmp sgt i32 %i.addr.041, %conv.1
%conv7.1 = zext i1 %cmp.1 to i32
- store i32 %conv7.1, i32* %1, align 4, !tbaa !1
- %5 = load i8, i8* @b, align 1, !tbaa !7
+ store i32 %conv7.1, ptr %1, align 4, !tbaa !1
+ %5 = load i8, ptr @b, align 1, !tbaa !7
%conv.2 = zext i8 %5 to i32
%cmp.2 = icmp sgt i32 %i.addr.041, %conv.2
%conv7.2 = zext i1 %cmp.2 to i32
- store i32 %conv7.2, i32* %1, align 4, !tbaa !1
- %6 = load i8, i8* @b, align 1, !tbaa !7
+ store i32 %conv7.2, ptr %1, align 4, !tbaa !1
+ %6 = load i8, ptr @b, align 1, !tbaa !7
%conv.3 = zext i8 %6 to i32
%cmp.3 = icmp sgt i32 %i.addr.041, %conv.3
%conv7.3 = zext i1 %cmp.3 to i32
- store i32 %conv7.3, i32* %1, align 4, !tbaa !1
- %7 = load i8, i8* @b, align 1, !tbaa !7
+ store i32 %conv7.3, ptr %1, align 4, !tbaa !1
+ %7 = load i8, ptr @b, align 1, !tbaa !7
%conv.4 = zext i8 %7 to i32
%cmp.4 = icmp sgt i32 %i.addr.041, %conv.4
%conv7.4 = zext i1 %cmp.4 to i32
- store i32 %conv7.4, i32* %1, align 4, !tbaa !1
- %8 = load i8, i8* @b, align 1, !tbaa !7
+ store i32 %conv7.4, ptr %1, align 4, !tbaa !1
+ %8 = load i8, ptr @b, align 1, !tbaa !7
%conv.5 = zext i8 %8 to i32
%cmp.5 = icmp sgt i32 %i.addr.041, %conv.5
%conv7.5 = zext i1 %cmp.5 to i32
- store i32 %conv7.5, i32* %1, align 4, !tbaa !1
- %9 = load i8, i8* @b, align 1, !tbaa !7
+ store i32 %conv7.5, ptr %1, align 4, !tbaa !1
+ %9 = load i8, ptr @b, align 1, !tbaa !7
%conv.6 = zext i8 %9 to i32
%cmp.6 = icmp sgt i32 %i.addr.041, %conv.6
%conv7.6 = zext i1 %cmp.6 to i32
- store i32 %conv7.6, i32* %1, align 4, !tbaa !1
- %10 = load i8, i8* @b, align 1, !tbaa !7
+ store i32 %conv7.6, ptr %1, align 4, !tbaa !1
+ %10 = load i8, ptr @b, align 1, !tbaa !7
%conv.7 = zext i8 %10 to i32
%cmp.7 = icmp sgt i32 %i.addr.041, %conv.7
%conv7.7 = zext i1 %cmp.7 to i32
- store i32 %conv7.7, i32* %1, align 4, !tbaa !1
- %11 = load i8, i8* @b, align 1, !tbaa !7
+ store i32 %conv7.7, ptr %1, align 4, !tbaa !1
+ %11 = load i8, ptr @b, align 1, !tbaa !7
%conv.8 = zext i8 %11 to i32
%cmp.8 = icmp sgt i32 %i.addr.041, %conv.8
%conv7.8 = zext i1 %cmp.8 to i32
- store i32 %conv7.8, i32* %1, align 4, !tbaa !1
+ store i32 %conv7.8, ptr %1, align 4, !tbaa !1
%dec11 = add nsw i32 %j.035, -1
%tobool2 = icmp eq i32 %dec11, 0
br i1 %tobool2, label %for.cond13.preheader, label %for.cond4.preheader
for.cond.for.end27_crit_edge: ; preds = %for.cond13.preheader
%conv8.le.le = zext i1 %cmp.8 to i64
- store i64 %conv8.le.le, i64* @e, align 8, !tbaa !8
- store i64 10, i64* @g, align 8, !tbaa !8
+ store i64 %conv8.le.le, ptr @e, align 8, !tbaa !8
+ store i64 10, ptr @g, align 8, !tbaa !8
br label %for.end27
for.end27: ; preds = %for.cond.for.end27_crit_edge, %entry
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
; Tests for(i) { sum = 0; for(j) sum += B[j]; A[i] = sum; }
-define void @test1(i32 %I, i32 %E, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @test1(i32 %I, i32 %E, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[E:%.*]], 0
; CHECK-NEXT: [[SUM_2:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[ADD_2:%.*]], [[FOR_INNER]] ]
; CHECK-NEXT: [[J_3:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[INC_3:%.*]], [[FOR_INNER]] ]
; CHECK-NEXT: [[SUM_3:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[ADD_3:%.*]], [[FOR_INNER]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[J]]
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !tbaa [[TBAA0:![0-9]+]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[J]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA0:![0-9]+]]
; CHECK-NEXT: [[ADD]] = add i32 [[TMP2]], [[SUM]]
; CHECK-NEXT: [[INC]] = add nuw i32 [[J]], 1
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_1]]
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_1]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_1]] = add i32 [[TMP3]], [[SUM_1]]
; CHECK-NEXT: [[INC_1]] = add nuw i32 [[J_1]], 1
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_2]]
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_2]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_2]] = add i32 [[TMP4]], [[SUM_2]]
; CHECK-NEXT: [[INC_2]] = add nuw i32 [[J_2]], 1
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_3]]
-; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX_3]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_3]]
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX_3]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_3]] = add i32 [[TMP5]], [[SUM_3]]
; CHECK-NEXT: [[INC_3]] = add nuw i32 [[J_3]], 1
; CHECK-NEXT: [[EXITCOND_3:%.*]] = icmp eq i32 [[INC_3]], [[E]]
; CHECK-NEXT: [[ADD_LCSSA_1:%.*]] = phi i32 [ [[ADD_1]], [[FOR_INNER]] ]
; CHECK-NEXT: [[ADD_LCSSA_2:%.*]] = phi i32 [ [[ADD_2]], [[FOR_INNER]] ]
; CHECK-NEXT: [[ADD_LCSSA_3:%.*]] = phi i32 [ [[ADD_3]], [[FOR_INNER]] ]
-; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[I]]
-; CHECK-NEXT: store i32 [[ADD_LCSSA]], i32* [[ARRAYIDX6]], align 4, !tbaa [[TBAA0]]
-; CHECK-NEXT: [[ARRAYIDX6_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD8]]
-; CHECK-NEXT: store i32 [[ADD_LCSSA_1]], i32* [[ARRAYIDX6_1]], align 4, !tbaa [[TBAA0]]
-; CHECK-NEXT: [[ARRAYIDX6_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD8_1]]
-; CHECK-NEXT: store i32 [[ADD_LCSSA_2]], i32* [[ARRAYIDX6_2]], align 4, !tbaa [[TBAA0]]
-; CHECK-NEXT: [[ARRAYIDX6_3:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD8_2]]
-; CHECK-NEXT: store i32 [[ADD_LCSSA_3]], i32* [[ARRAYIDX6_3]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA]], ptr [[ARRAYIDX6]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD8]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA_1]], ptr [[ARRAYIDX6_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD8_1]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA_2]], ptr [[ARRAYIDX6_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_3:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD8_2]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA_3]], ptr [[ARRAYIDX6_3]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i32 [[NITER_NEXT_3]], [[UNROLL_ITER]]
; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label [[FOR_END_LOOPEXIT_UNR_LCSSA_LOOPEXIT:%.*]], label [[FOR_OUTER]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: for.end.loopexit.unr-lcssa.loopexit:
; CHECK: for.inner.epil:
; CHECK-NEXT: [[J_EPIL:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL]] ], [ [[INC_EPIL:%.*]], [[FOR_INNER_EPIL]] ]
; CHECK-NEXT: [[SUM_EPIL:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL]] ], [ [[ADD_EPIL:%.*]], [[FOR_INNER_EPIL]] ]
-; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_EPIL]]
-; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX_EPIL]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_EPIL]]
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX_EPIL]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_EPIL]] = add i32 [[TMP6]], [[SUM_EPIL]]
; CHECK-NEXT: [[INC_EPIL]] = add nuw i32 [[J_EPIL]], 1
; CHECK-NEXT: [[EXITCOND_EPIL:%.*]] = icmp eq i32 [[INC_EPIL]], [[E]]
; CHECK-NEXT: br i1 [[EXITCOND_EPIL]], label [[FOR_LATCH_EPIL:%.*]], label [[FOR_INNER_EPIL]]
; CHECK: for.latch.epil:
; CHECK-NEXT: [[ADD_LCSSA_EPIL:%.*]] = phi i32 [ [[ADD_EPIL]], [[FOR_INNER_EPIL]] ]
-; CHECK-NEXT: [[ARRAYIDX6_EPIL:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[I_UNR]]
-; CHECK-NEXT: store i32 [[ADD_LCSSA_EPIL]], i32* [[ARRAYIDX6_EPIL]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[I_UNR]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA_EPIL]], ptr [[ARRAYIDX6_EPIL]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD8_EPIL:%.*]] = add nuw i32 [[I_UNR]], 1
; CHECK-NEXT: [[EPIL_ITER_CMP:%.*]] = icmp ne i32 1, [[XTRAITER]]
; CHECK-NEXT: br i1 [[EPIL_ITER_CMP]], label [[FOR_OUTER_EPIL_1:%.*]], label [[FOR_END_LOOPEXIT_EPILOG_LCSSA:%.*]]
; CHECK: for.inner.epil.1:
; CHECK-NEXT: [[J_EPIL_1:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL_1]] ], [ [[INC_EPIL_1:%.*]], [[FOR_INNER_EPIL_1]] ]
; CHECK-NEXT: [[SUM_EPIL_1:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL_1]] ], [ [[ADD_EPIL_1:%.*]], [[FOR_INNER_EPIL_1]] ]
-; CHECK-NEXT: [[ARRAYIDX_EPIL_1:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_EPIL_1]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX_EPIL_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL_1:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_EPIL_1]]
+; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX_EPIL_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_EPIL_1]] = add i32 [[TMP7]], [[SUM_EPIL_1]]
; CHECK-NEXT: [[INC_EPIL_1]] = add nuw i32 [[J_EPIL_1]], 1
; CHECK-NEXT: [[EXITCOND_EPIL_1:%.*]] = icmp eq i32 [[INC_EPIL_1]], [[E]]
; CHECK-NEXT: br i1 [[EXITCOND_EPIL_1]], label [[FOR_LATCH_EPIL_1:%.*]], label [[FOR_INNER_EPIL_1]]
; CHECK: for.latch.epil.1:
; CHECK-NEXT: [[ADD_LCSSA_EPIL_1:%.*]] = phi i32 [ [[ADD_EPIL_1]], [[FOR_INNER_EPIL_1]] ]
-; CHECK-NEXT: [[ARRAYIDX6_EPIL_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD8_EPIL]]
-; CHECK-NEXT: store i32 [[ADD_LCSSA_EPIL_1]], i32* [[ARRAYIDX6_EPIL_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_EPIL_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD8_EPIL]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA_EPIL_1]], ptr [[ARRAYIDX6_EPIL_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD8_EPIL_1:%.*]] = add nuw i32 [[ADD8_EPIL]], 1
; CHECK-NEXT: [[EPIL_ITER_CMP_1:%.*]] = icmp ne i32 2, [[XTRAITER]]
; CHECK-NEXT: br i1 [[EPIL_ITER_CMP_1]], label [[FOR_OUTER_EPIL_2:%.*]], label [[FOR_END_LOOPEXIT_EPILOG_LCSSA]]
; CHECK: for.inner.epil.2:
; CHECK-NEXT: [[J_EPIL_2:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL_2]] ], [ [[INC_EPIL_2:%.*]], [[FOR_INNER_EPIL_2]] ]
; CHECK-NEXT: [[SUM_EPIL_2:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL_2]] ], [ [[ADD_EPIL_2:%.*]], [[FOR_INNER_EPIL_2]] ]
-; CHECK-NEXT: [[ARRAYIDX_EPIL_2:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_EPIL_2]]
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX_EPIL_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL_2:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_EPIL_2]]
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX_EPIL_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_EPIL_2]] = add i32 [[TMP8]], [[SUM_EPIL_2]]
; CHECK-NEXT: [[INC_EPIL_2]] = add nuw i32 [[J_EPIL_2]], 1
; CHECK-NEXT: [[EXITCOND_EPIL_2:%.*]] = icmp eq i32 [[INC_EPIL_2]], [[E]]
; CHECK-NEXT: br i1 [[EXITCOND_EPIL_2]], label [[FOR_LATCH_EPIL_2:%.*]], label [[FOR_INNER_EPIL_2]]
; CHECK: for.latch.epil.2:
; CHECK-NEXT: [[ADD_LCSSA_EPIL_2:%.*]] = phi i32 [ [[ADD_EPIL_2]], [[FOR_INNER_EPIL_2]] ]
-; CHECK-NEXT: [[ARRAYIDX6_EPIL_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD8_EPIL_1]]
-; CHECK-NEXT: store i32 [[ADD_LCSSA_EPIL_2]], i32* [[ARRAYIDX6_EPIL_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_EPIL_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD8_EPIL_1]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA_EPIL_2]], ptr [[ARRAYIDX6_EPIL_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: br label [[FOR_END_LOOPEXIT_EPILOG_LCSSA]]
; CHECK: for.end.loopexit.epilog-lcssa:
; CHECK-NEXT: br label [[FOR_END_LOOPEXIT]]
for.inner:
%j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
%sum = phi i32 [ 0, %for.outer ], [ %add, %for.inner ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx, align 4, !tbaa !5
+ %arrayidx = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx, align 4, !tbaa !5
%add = add i32 %0, %sum
%inc = add nuw i32 %j, 1
%exitcond = icmp eq i32 %inc, %E
for.latch:
%add.lcssa = phi i32 [ %add, %for.inner ]
- %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 %add.lcssa, i32* %arrayidx6, align 4, !tbaa !5
+ %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 %add.lcssa, ptr %arrayidx6, align 4, !tbaa !5
%add8 = add nuw i32 %i, 1
%exitcond25 = icmp eq i32 %add8, %I
br i1 %exitcond25, label %for.end.loopexit, label %for.outer
; Tests for(i) { sum = A[i]; for(j) sum += B[j]; A[i] = sum; }
; A[i] load/store dependency should not block unroll-and-jam
-define void @test2(i32 %I, i32 %E, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @test2(i32 %I, i32 %E, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[E:%.*]], 0
; CHECK: for.outer:
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[ADD9_3:%.*]], [[FOR_LATCH:%.*]] ], [ 0, [[FOR_OUTER_PREHEADER_NEW]] ]
; CHECK-NEXT: [[NITER:%.*]] = phi i32 [ 0, [[FOR_OUTER_PREHEADER_NEW]] ], [ [[NITER_NEXT_3:%.*]], [[FOR_LATCH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[I]]
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD9:%.*]] = add nuw nsw i32 [[I]], 1
; CHECK-NEXT: [[NITER_NEXT:%.*]] = add nuw nsw i32 [[NITER]], 1
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD9]]
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD9]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD9_1:%.*]] = add nuw nsw i32 [[ADD9]], 1
; CHECK-NEXT: [[NITER_NEXT_1:%.*]] = add nuw nsw i32 [[NITER_NEXT]], 1
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD9_1]]
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD9_1]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD9_2:%.*]] = add nuw nsw i32 [[ADD9_1]], 1
; CHECK-NEXT: [[NITER_NEXT_2:%.*]] = add nuw nsw i32 [[NITER_NEXT_1]], 1
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD9_2]]
-; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX_3]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD9_2]]
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX_3]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD9_3]] = add nuw i32 [[ADD9_2]], 1
; CHECK-NEXT: [[NITER_NEXT_3]] = add i32 [[NITER_NEXT_2]], 1
; CHECK-NEXT: br label [[FOR_INNER:%.*]]
; CHECK-NEXT: [[SUM_2:%.*]] = phi i32 [ [[TMP4]], [[FOR_OUTER]] ], [ [[ADD_2:%.*]], [[FOR_INNER]] ]
; CHECK-NEXT: [[J_3:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[INC_3:%.*]], [[FOR_INNER]] ]
; CHECK-NEXT: [[SUM_3:%.*]] = phi i32 [ [[TMP5]], [[FOR_OUTER]] ], [ [[ADD_3:%.*]], [[FOR_INNER]] ]
-; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[J]]
-; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[J]]
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD]] = add i32 [[TMP6]], [[SUM]]
; CHECK-NEXT: [[INC]] = add nuw i32 [[J]], 1
-; CHECK-NEXT: [[ARRAYIDX6_1:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_1]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX6_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_1:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_1]]
+; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX6_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_1]] = add i32 [[TMP7]], [[SUM_1]]
; CHECK-NEXT: [[INC_1]] = add nuw i32 [[J_1]], 1
-; CHECK-NEXT: [[ARRAYIDX6_2:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_2]]
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX6_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_2:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_2]]
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX6_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_2]] = add i32 [[TMP8]], [[SUM_2]]
; CHECK-NEXT: [[INC_2]] = add nuw i32 [[J_2]], 1
-; CHECK-NEXT: [[ARRAYIDX6_3:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_3]]
-; CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX6_3]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_3:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_3]]
+; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX6_3]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_3]] = add i32 [[TMP9]], [[SUM_3]]
; CHECK-NEXT: [[INC_3]] = add nuw i32 [[J_3]], 1
; CHECK-NEXT: [[EXITCOND_3:%.*]] = icmp eq i32 [[INC_3]], [[E]]
; CHECK-NEXT: [[ADD_LCSSA_1:%.*]] = phi i32 [ [[ADD_1]], [[FOR_INNER]] ]
; CHECK-NEXT: [[ADD_LCSSA_2:%.*]] = phi i32 [ [[ADD_2]], [[FOR_INNER]] ]
; CHECK-NEXT: [[ADD_LCSSA_3:%.*]] = phi i32 [ [[ADD_3]], [[FOR_INNER]] ]
-; CHECK-NEXT: store i32 [[ADD_LCSSA]], i32* [[ARRAYIDX]], align 4, !tbaa [[TBAA0]]
-; CHECK-NEXT: store i32 [[ADD_LCSSA_1]], i32* [[ARRAYIDX_1]], align 4, !tbaa [[TBAA0]]
-; CHECK-NEXT: store i32 [[ADD_LCSSA_2]], i32* [[ARRAYIDX_2]], align 4, !tbaa [[TBAA0]]
-; CHECK-NEXT: store i32 [[ADD_LCSSA_3]], i32* [[ARRAYIDX_3]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA]], ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA_1]], ptr [[ARRAYIDX_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA_2]], ptr [[ARRAYIDX_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA_3]], ptr [[ARRAYIDX_3]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i32 [[NITER_NEXT_3]], [[UNROLL_ITER]]
; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label [[FOR_END10_LOOPEXIT_UNR_LCSSA_LOOPEXIT:%.*]], label [[FOR_OUTER]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: for.end10.loopexit.unr-lcssa.loopexit:
; CHECK: for.outer.epil.preheader:
; CHECK-NEXT: br label [[FOR_OUTER_EPIL:%.*]]
; CHECK: for.outer.epil:
-; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[I_UNR]]
-; CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX_EPIL]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[I_UNR]]
+; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX_EPIL]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: br label [[FOR_INNER_EPIL:%.*]]
; CHECK: for.inner.epil:
; CHECK-NEXT: [[J_EPIL:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL]] ], [ [[INC_EPIL:%.*]], [[FOR_INNER_EPIL]] ]
; CHECK-NEXT: [[SUM_EPIL:%.*]] = phi i32 [ [[TMP10]], [[FOR_OUTER_EPIL]] ], [ [[ADD_EPIL:%.*]], [[FOR_INNER_EPIL]] ]
-; CHECK-NEXT: [[ARRAYIDX6_EPIL:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_EPIL]]
-; CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX6_EPIL]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_EPIL:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_EPIL]]
+; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX6_EPIL]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_EPIL]] = add i32 [[TMP11]], [[SUM_EPIL]]
; CHECK-NEXT: [[INC_EPIL]] = add nuw i32 [[J_EPIL]], 1
; CHECK-NEXT: [[EXITCOND_EPIL:%.*]] = icmp eq i32 [[INC_EPIL]], [[E]]
; CHECK-NEXT: br i1 [[EXITCOND_EPIL]], label [[FOR_LATCH_EPIL:%.*]], label [[FOR_INNER_EPIL]]
; CHECK: for.latch.epil:
; CHECK-NEXT: [[ADD_LCSSA_EPIL:%.*]] = phi i32 [ [[ADD_EPIL]], [[FOR_INNER_EPIL]] ]
-; CHECK-NEXT: store i32 [[ADD_LCSSA_EPIL]], i32* [[ARRAYIDX_EPIL]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA_EPIL]], ptr [[ARRAYIDX_EPIL]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD9_EPIL:%.*]] = add nuw i32 [[I_UNR]], 1
; CHECK-NEXT: [[EPIL_ITER_CMP:%.*]] = icmp ne i32 1, [[XTRAITER]]
; CHECK-NEXT: br i1 [[EPIL_ITER_CMP]], label [[FOR_OUTER_EPIL_1:%.*]], label [[FOR_END10_LOOPEXIT_EPILOG_LCSSA:%.*]]
; CHECK: for.outer.epil.1:
-; CHECK-NEXT: [[ARRAYIDX_EPIL_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD9_EPIL]]
-; CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[ARRAYIDX_EPIL_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD9_EPIL]]
+; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX_EPIL_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: br label [[FOR_INNER_EPIL_1:%.*]]
; CHECK: for.inner.epil.1:
; CHECK-NEXT: [[J_EPIL_1:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL_1]] ], [ [[INC_EPIL_1:%.*]], [[FOR_INNER_EPIL_1]] ]
; CHECK-NEXT: [[SUM_EPIL_1:%.*]] = phi i32 [ [[TMP12]], [[FOR_OUTER_EPIL_1]] ], [ [[ADD_EPIL_1:%.*]], [[FOR_INNER_EPIL_1]] ]
-; CHECK-NEXT: [[ARRAYIDX6_EPIL_1:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_EPIL_1]]
-; CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[ARRAYIDX6_EPIL_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_EPIL_1:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_EPIL_1]]
+; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX6_EPIL_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_EPIL_1]] = add i32 [[TMP13]], [[SUM_EPIL_1]]
; CHECK-NEXT: [[INC_EPIL_1]] = add nuw i32 [[J_EPIL_1]], 1
; CHECK-NEXT: [[EXITCOND_EPIL_1:%.*]] = icmp eq i32 [[INC_EPIL_1]], [[E]]
; CHECK-NEXT: br i1 [[EXITCOND_EPIL_1]], label [[FOR_LATCH_EPIL_1:%.*]], label [[FOR_INNER_EPIL_1]]
; CHECK: for.latch.epil.1:
; CHECK-NEXT: [[ADD_LCSSA_EPIL_1:%.*]] = phi i32 [ [[ADD_EPIL_1]], [[FOR_INNER_EPIL_1]] ]
-; CHECK-NEXT: store i32 [[ADD_LCSSA_EPIL_1]], i32* [[ARRAYIDX_EPIL_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA_EPIL_1]], ptr [[ARRAYIDX_EPIL_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD9_EPIL_1:%.*]] = add nuw i32 [[ADD9_EPIL]], 1
; CHECK-NEXT: [[EPIL_ITER_CMP_1:%.*]] = icmp ne i32 2, [[XTRAITER]]
; CHECK-NEXT: br i1 [[EPIL_ITER_CMP_1]], label [[FOR_OUTER_EPIL_2:%.*]], label [[FOR_END10_LOOPEXIT_EPILOG_LCSSA]]
; CHECK: for.outer.epil.2:
-; CHECK-NEXT: [[ARRAYIDX_EPIL_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD9_EPIL_1]]
-; CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[ARRAYIDX_EPIL_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD9_EPIL_1]]
+; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX_EPIL_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: br label [[FOR_INNER_EPIL_2:%.*]]
; CHECK: for.inner.epil.2:
; CHECK-NEXT: [[J_EPIL_2:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL_2]] ], [ [[INC_EPIL_2:%.*]], [[FOR_INNER_EPIL_2]] ]
; CHECK-NEXT: [[SUM_EPIL_2:%.*]] = phi i32 [ [[TMP14]], [[FOR_OUTER_EPIL_2]] ], [ [[ADD_EPIL_2:%.*]], [[FOR_INNER_EPIL_2]] ]
-; CHECK-NEXT: [[ARRAYIDX6_EPIL_2:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_EPIL_2]]
-; CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX6_EPIL_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_EPIL_2:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_EPIL_2]]
+; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX6_EPIL_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_EPIL_2]] = add i32 [[TMP15]], [[SUM_EPIL_2]]
; CHECK-NEXT: [[INC_EPIL_2]] = add nuw i32 [[J_EPIL_2]], 1
; CHECK-NEXT: [[EXITCOND_EPIL_2:%.*]] = icmp eq i32 [[INC_EPIL_2]], [[E]]
; CHECK-NEXT: br i1 [[EXITCOND_EPIL_2]], label [[FOR_LATCH_EPIL_2:%.*]], label [[FOR_INNER_EPIL_2]]
; CHECK: for.latch.epil.2:
; CHECK-NEXT: [[ADD_LCSSA_EPIL_2:%.*]] = phi i32 [ [[ADD_EPIL_2]], [[FOR_INNER_EPIL_2]] ]
-; CHECK-NEXT: store i32 [[ADD_LCSSA_EPIL_2]], i32* [[ARRAYIDX_EPIL_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA_EPIL_2]], ptr [[ARRAYIDX_EPIL_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: br label [[FOR_END10_LOOPEXIT_EPILOG_LCSSA]]
; CHECK: for.end10.loopexit.epilog-lcssa:
; CHECK-NEXT: br label [[FOR_END10_LOOPEXIT]]
for.outer:
%i = phi i32 [ %add9, %for.latch ], [ 0, %for.outer.preheader ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i
- %0 = load i32, i32* %arrayidx, align 4, !tbaa !5
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i
+ %0 = load i32, ptr %arrayidx, align 4, !tbaa !5
br label %for.inner
for.inner:
%j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
%sum = phi i32 [ %0, %for.outer ], [ %add, %for.inner ]
- %arrayidx6 = getelementptr inbounds i32, i32* %B, i32 %j
- %1 = load i32, i32* %arrayidx6, align 4, !tbaa !5
+ %arrayidx6 = getelementptr inbounds i32, ptr %B, i32 %j
+ %1 = load i32, ptr %arrayidx6, align 4, !tbaa !5
%add = add i32 %1, %sum
%inc = add nuw i32 %j, 1
%exitcond = icmp eq i32 %inc, %E
for.latch:
%add.lcssa = phi i32 [ %add, %for.inner ]
- store i32 %add.lcssa, i32* %arrayidx, align 4, !tbaa !5
+ store i32 %add.lcssa, ptr %arrayidx, align 4, !tbaa !5
%add9 = add nuw i32 %i, 1
%exitcond28 = icmp eq i32 %add9, %I
br i1 %exitcond28, label %for.end10.loopexit, label %for.outer
; Tests Complete unroll-and-jam of the outer loop
-define void @test3(i32 %I, i32 %E, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @test3(i32 %I, i32 %E, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK-LABEL: @test3(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[E:%.*]], 0
; CHECK-NEXT: [[SUM_2:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[ADD_2:%.*]], [[FOR_INNER]] ]
; CHECK-NEXT: [[J_3:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[INC_3:%.*]], [[FOR_INNER]] ]
; CHECK-NEXT: [[SUM_3:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[ADD_3:%.*]], [[FOR_INNER]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[J]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[J]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[SUB:%.*]] = add i32 [[SUM]], 10
; CHECK-NEXT: [[ADD]] = sub i32 [[SUB]], [[TMP0]]
; CHECK-NEXT: [[INC]] = add nuw i32 [[J]], 1
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_1]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_1]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[SUB_1:%.*]] = add i32 [[SUM_1]], 10
; CHECK-NEXT: [[ADD_1]] = sub i32 [[SUB_1]], [[TMP1]]
; CHECK-NEXT: [[INC_1]] = add nuw i32 [[J_1]], 1
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_2]]
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_2]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[SUB_2:%.*]] = add i32 [[SUM_2]], 10
; CHECK-NEXT: [[ADD_2]] = sub i32 [[SUB_2]], [[TMP2]]
; CHECK-NEXT: [[INC_2]] = add nuw i32 [[J_2]], 1
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_3]]
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX_3]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_3]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX_3]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[SUB_3:%.*]] = add i32 [[SUM_3]], 10
; CHECK-NEXT: [[ADD_3]] = sub i32 [[SUB_3]], [[TMP3]]
; CHECK-NEXT: [[INC_3]] = add nuw i32 [[J_3]], 1
; CHECK-NEXT: [[ADD_LCSSA_1:%.*]] = phi i32 [ [[ADD_1]], [[FOR_INNER]] ]
; CHECK-NEXT: [[ADD_LCSSA_2:%.*]] = phi i32 [ [[ADD_2]], [[FOR_INNER]] ]
; CHECK-NEXT: [[ADD_LCSSA_3:%.*]] = phi i32 [ [[ADD_3]], [[FOR_INNER]] ]
-; CHECK-NEXT: store i32 [[ADD_LCSSA]], i32* [[A:%.*]], align 4, !tbaa [[TBAA0]]
-; CHECK-NEXT: [[ARRAYIDX6_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 1
-; CHECK-NEXT: store i32 [[ADD_LCSSA_1]], i32* [[ARRAYIDX6_1]], align 4, !tbaa [[TBAA0]]
-; CHECK-NEXT: [[ARRAYIDX6_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 2
-; CHECK-NEXT: store i32 [[ADD_LCSSA_2]], i32* [[ARRAYIDX6_2]], align 4, !tbaa [[TBAA0]]
-; CHECK-NEXT: [[ARRAYIDX6_3:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 3
-; CHECK-NEXT: store i32 [[ADD_LCSSA_3]], i32* [[ARRAYIDX6_3]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA]], ptr [[A:%.*]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 1
+; CHECK-NEXT: store i32 [[ADD_LCSSA_1]], ptr [[ARRAYIDX6_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 2
+; CHECK-NEXT: store i32 [[ADD_LCSSA_2]], ptr [[ARRAYIDX6_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_3:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 3
+; CHECK-NEXT: store i32 [[ADD_LCSSA_3]], ptr [[ARRAYIDX6_3]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: br label [[FOR_END_LOOPEXIT:%.*]]
; CHECK: for.end.loopexit:
; CHECK-NEXT: br label [[FOR_END]]
for.inner:
%j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
%sum = phi i32 [ 0, %for.outer ], [ %add, %for.inner ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx, align 4, !tbaa !5
+ %arrayidx = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx, align 4, !tbaa !5
%sub = add i32 %sum, 10
%add = sub i32 %sub, %0
%inc = add nuw i32 %j, 1
br i1 %exitcond, label %for.latch, label %for.inner
for.latch:
- %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 %add, i32* %arrayidx6, align 4, !tbaa !5
+ %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 %add, ptr %arrayidx6, align 4, !tbaa !5
%add8 = add nuw nsw i32 %i, 1
%exitcond23 = icmp eq i32 %add8, 4
br i1 %exitcond23, label %for.end, label %for.outer
; Tests Complete unroll-and-jam with a trip count of 1
-define void @test4(i32 %I, i32 %E, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @test4(i32 %I, i32 %E, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK-LABEL: @test4(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[E:%.*]], 0
; CHECK: for.inner:
; CHECK-NEXT: [[J:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[INC:%.*]], [[FOR_INNER]] ]
; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[ADD:%.*]], [[FOR_INNER]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[J]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[J]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[SUB:%.*]] = add i32 [[SUM]], 10
; CHECK-NEXT: [[ADD]] = sub i32 [[SUB]], [[TMP0]]
; CHECK-NEXT: [[INC]] = add nuw i32 [[J]], 1
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_LATCH:%.*]], label [[FOR_INNER]]
; CHECK: for.latch:
; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_INNER]] ]
-; CHECK-NEXT: store i32 [[ADD_LCSSA]], i32* [[A:%.*]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA]], ptr [[A:%.*]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: br label [[FOR_END_LOOPEXIT:%.*]]
; CHECK: for.end.loopexit:
; CHECK-NEXT: br label [[FOR_END]]
for.inner:
%j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
%sum = phi i32 [ 0, %for.outer ], [ %add, %for.inner ]
- %arrayidx = getelementptr inbounds i32, i32* %B, i32 %j
- %0 = load i32, i32* %arrayidx, align 4, !tbaa !5
+ %arrayidx = getelementptr inbounds i32, ptr %B, i32 %j
+ %0 = load i32, ptr %arrayidx, align 4, !tbaa !5
%sub = add i32 %sum, 10
%add = sub i32 %sub, %0
%inc = add nuw i32 %j, 1
br i1 %exitcond, label %for.latch, label %for.inner
for.latch:
- %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 %add, i32* %arrayidx6, align 4, !tbaa !5
+ %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 %add, ptr %arrayidx6, align 4, !tbaa !5
%add8 = add nuw nsw i32 %i, 1
%exitcond23 = icmp eq i32 %add8, 1
br i1 %exitcond23, label %for.end, label %for.outer
; CHECK-NEXT: [[INC8_SINK15_1:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[INC8_1:%.*]], [[FOR_INC_1]] ]
; CHECK-NEXT: br label [[FOR_INNER2:%.*]]
; CHECK: for.inner2:
-; CHECK-NEXT: [[L1:%.*]] = load i32, i32* getelementptr inbounds ([1 x i32], [1 x i32]* @a, i32 0, i32 0), align 4
+; CHECK-NEXT: [[L1:%.*]] = load i32, ptr @a, align 4
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[L1]], 0
; CHECK-NEXT: br i1 [[TOBOOL]], label [[FOR_COND4:%.*]], label [[FOR_INC:%.*]]
; CHECK: for.cond4:
-; CHECK-NEXT: [[L0:%.*]] = load i32, i32* getelementptr inbounds ([1 x i32], [1 x i32]* @a, i32 1, i32 0), align 4
+; CHECK-NEXT: [[L0:%.*]] = load i32, ptr getelementptr inbounds ([1 x i32], ptr @a, i32 1, i32 0), align 4
; CHECK-NEXT: [[TOBOOL_1:%.*]] = icmp eq i32 [[L0]], 0
; CHECK-NEXT: br i1 [[TOBOOL_1]], label [[FOR_COND4A:%.*]], label [[FOR_INC]]
; CHECK: for.cond4a:
; CHECK-NEXT: br label [[FOR_INC]]
; CHECK: for.inc:
; CHECK-NEXT: [[INC8]] = add nuw nsw i32 [[INC8_SINK15]], 1
-; CHECK-NEXT: [[L1_1:%.*]] = load i32, i32* getelementptr inbounds ([1 x i32], [1 x i32]* @a, i32 0, i32 0), align 4
+; CHECK-NEXT: [[L1_1:%.*]] = load i32, ptr @a, align 4
; CHECK-NEXT: [[TOBOOL_11:%.*]] = icmp eq i32 [[L1_1]], 0
; CHECK-NEXT: br i1 [[TOBOOL_11]], label [[FOR_COND4_1:%.*]], label [[FOR_INC_1]]
; CHECK: for.latch:
; CHECK-NEXT: [[DOTLCSSA_LCSSA:%.*]] = phi i32 [ [[DOTLCSSA_1]], [[FOR_LATCH:%.*]] ]
; CHECK-NEXT: ret i32 0
; CHECK: for.cond4.1:
-; CHECK-NEXT: [[L0_1:%.*]] = load i32, i32* getelementptr inbounds ([1 x i32], [1 x i32]* @a, i32 1, i32 0), align 4
+; CHECK-NEXT: [[L0_1:%.*]] = load i32, ptr getelementptr inbounds ([1 x i32], ptr @a, i32 1, i32 0), align 4
; CHECK-NEXT: [[TOBOOL_1_1:%.*]] = icmp eq i32 [[L0_1]], 0
; CHECK-NEXT: br i1 [[TOBOOL_1_1]], label [[FOR_COND4A_1:%.*]], label [[FOR_INC_1]]
; CHECK: for.cond4a.1:
br label %for.inner2
for.inner2:
- %l1 = load i32, i32* getelementptr inbounds ([1 x i32], [1 x i32]* @a, i32 0, i32 0), align 4
+ %l1 = load i32, ptr @a, align 4
%tobool = icmp eq i32 %l1, 0
br i1 %tobool, label %for.cond4, label %for.inc
for.cond4:
- %l0 = load i32, i32* getelementptr inbounds ([1 x i32], [1 x i32]* @a, i32 1, i32 0), align 4
+ %l0 = load i32, ptr getelementptr inbounds ([1 x i32], ptr @a, i32 1, i32 0), align 4
%tobool.1 = icmp eq i32 %l0, 0
br i1 %tobool.1, label %for.cond4a, label %for.inc
define i32 @test6() #0 {
; CHECK-LABEL: @test6(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[F_PROMOTED10:%.*]] = load i32, i32* @f, align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[F_PROMOTED10:%.*]] = load i32, ptr @f, align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: br i1 false, label [[FOR_END_UNR_LCSSA:%.*]], label [[ENTRY_NEW:%.*]]
; CHECK: entry.new:
; CHECK-NEXT: br label [[FOR_OUTER:%.*]]
; CHECK-NEXT: ret i32 0
;
entry:
- %f.promoted10 = load i32, i32* @f, align 4, !tbaa !5
+ %f.promoted10 = load i32, ptr @f, align 4, !tbaa !5
br label %for.outer
for.outer:
; Has a positive dependency between two stores. Still valid.
; The negative dependecy is in unroll-and-jam-disabled.ll
-define void @test7(i32 %I, i32 %E, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @test7(i32 %I, i32 %E, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK-LABEL: @test7(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[E:%.*]], 0
; CHECK: for.outer:
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[ADD_3:%.*]], [[FOR_LATCH:%.*]] ], [ 0, [[FOR_PREHEADER_NEW]] ]
; CHECK-NEXT: [[NITER:%.*]] = phi i32 [ 0, [[FOR_PREHEADER_NEW]] ], [ [[NITER_NEXT_3:%.*]], [[FOR_LATCH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[I]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[I]], 1
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD]]
-; CHECK-NEXT: store i32 2, i32* [[ARRAYIDX2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD]]
+; CHECK-NEXT: store i32 2, ptr [[ARRAYIDX2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[NITER_NEXT:%.*]] = add nuw nsw i32 [[NITER]], 1
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_1:%.*]] = add nuw nsw i32 [[ADD]], 1
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD_1]]
-; CHECK-NEXT: store i32 2, i32* [[ARRAYIDX2_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_1]]
+; CHECK-NEXT: store i32 2, ptr [[ARRAYIDX2_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[NITER_NEXT_1:%.*]] = add nuw nsw i32 [[NITER_NEXT]], 1
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD_1]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_1]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_2:%.*]] = add nuw nsw i32 [[ADD_1]], 1
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD_2]]
-; CHECK-NEXT: store i32 2, i32* [[ARRAYIDX2_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_2]]
+; CHECK-NEXT: store i32 2, ptr [[ARRAYIDX2_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[NITER_NEXT_2:%.*]] = add nuw nsw i32 [[NITER_NEXT_1]], 1
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD_2]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX_3]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_2]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX_3]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_3]] = add nuw i32 [[ADD_2]], 1
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD_3]]
-; CHECK-NEXT: store i32 2, i32* [[ARRAYIDX2_3]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_3]]
+; CHECK-NEXT: store i32 2, ptr [[ARRAYIDX2_3]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[NITER_NEXT_3]] = add i32 [[NITER_NEXT_2]], 1
; CHECK-NEXT: br label [[FOR_INNER:%.*]]
; CHECK: for.latch:
; CHECK-NEXT: [[ADD9_LCSSA_1:%.*]] = phi i32 [ [[ADD9_1:%.*]], [[FOR_INNER]] ]
; CHECK-NEXT: [[ADD9_LCSSA_2:%.*]] = phi i32 [ [[ADD9_2:%.*]], [[FOR_INNER]] ]
; CHECK-NEXT: [[ADD9_LCSSA_3:%.*]] = phi i32 [ [[ADD9_3:%.*]], [[FOR_INNER]] ]
-; CHECK-NEXT: store i32 [[ADD9_LCSSA]], i32* [[ARRAYIDX]], align 4, !tbaa [[TBAA0]]
-; CHECK-NEXT: store i32 [[ADD9_LCSSA_1]], i32* [[ARRAYIDX_1]], align 4, !tbaa [[TBAA0]]
-; CHECK-NEXT: store i32 [[ADD9_LCSSA_2]], i32* [[ARRAYIDX_2]], align 4, !tbaa [[TBAA0]]
-; CHECK-NEXT: store i32 [[ADD9_LCSSA_3]], i32* [[ARRAYIDX_3]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD9_LCSSA]], ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD9_LCSSA_1]], ptr [[ARRAYIDX_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD9_LCSSA_2]], ptr [[ARRAYIDX_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD9_LCSSA_3]], ptr [[ARRAYIDX_3]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i32 [[NITER_NEXT_3]], [[UNROLL_ITER]]
; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label [[FOR_END_LOOPEXIT_UNR_LCSSA_LOOPEXIT:%.*]], label [[FOR_OUTER]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: for.inner:
; CHECK-NEXT: [[J_2:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[ADD10_2:%.*]], [[FOR_INNER]] ]
; CHECK-NEXT: [[SUM_3:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[ADD9_3]], [[FOR_INNER]] ]
; CHECK-NEXT: [[J_3:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[ADD10_3:%.*]], [[FOR_INNER]] ]
-; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[J]]
-; CHECK-NEXT: [[L1:%.*]] = load i32, i32* [[ARRAYIDX7]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[J]]
+; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX7]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD9]] = add i32 [[L1]], [[SUM]]
; CHECK-NEXT: [[ADD10]] = add nuw i32 [[J]], 1
-; CHECK-NEXT: [[ARRAYIDX7_1:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_1]]
-; CHECK-NEXT: [[L1_1:%.*]] = load i32, i32* [[ARRAYIDX7_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX7_1:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_1]]
+; CHECK-NEXT: [[L1_1:%.*]] = load i32, ptr [[ARRAYIDX7_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD9_1]] = add i32 [[L1_1]], [[SUM_1]]
; CHECK-NEXT: [[ADD10_1]] = add nuw i32 [[J_1]], 1
-; CHECK-NEXT: [[ARRAYIDX7_2:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_2]]
-; CHECK-NEXT: [[L1_2:%.*]] = load i32, i32* [[ARRAYIDX7_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX7_2:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_2]]
+; CHECK-NEXT: [[L1_2:%.*]] = load i32, ptr [[ARRAYIDX7_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD9_2]] = add i32 [[L1_2]], [[SUM_2]]
; CHECK-NEXT: [[ADD10_2]] = add nuw i32 [[J_2]], 1
-; CHECK-NEXT: [[ARRAYIDX7_3:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_3]]
-; CHECK-NEXT: [[L1_3:%.*]] = load i32, i32* [[ARRAYIDX7_3]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX7_3:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_3]]
+; CHECK-NEXT: [[L1_3:%.*]] = load i32, ptr [[ARRAYIDX7_3]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD9_3]] = add i32 [[L1_3]], [[SUM_3]]
; CHECK-NEXT: [[ADD10_3]] = add nuw i32 [[J_3]], 1
; CHECK-NEXT: [[EXITCOND_3:%.*]] = icmp eq i32 [[ADD10_3]], [[E]]
; CHECK: for.outer.epil.preheader:
; CHECK-NEXT: br label [[FOR_OUTER_EPIL:%.*]]
; CHECK: for.outer.epil:
-; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[I_UNR]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX_EPIL]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[I_UNR]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX_EPIL]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_EPIL:%.*]] = add nuw i32 [[I_UNR]], 1
-; CHECK-NEXT: [[ARRAYIDX2_EPIL:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD_EPIL]]
-; CHECK-NEXT: store i32 2, i32* [[ARRAYIDX2_EPIL]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX2_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_EPIL]]
+; CHECK-NEXT: store i32 2, ptr [[ARRAYIDX2_EPIL]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: br label [[FOR_INNER_EPIL:%.*]]
; CHECK: for.inner.epil:
; CHECK-NEXT: [[SUM_EPIL:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL]] ], [ [[ADD9_EPIL:%.*]], [[FOR_INNER_EPIL]] ]
; CHECK-NEXT: [[J_EPIL:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL]] ], [ [[ADD10_EPIL:%.*]], [[FOR_INNER_EPIL]] ]
-; CHECK-NEXT: [[ARRAYIDX7_EPIL:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_EPIL]]
-; CHECK-NEXT: [[L1_EPIL:%.*]] = load i32, i32* [[ARRAYIDX7_EPIL]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX7_EPIL:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_EPIL]]
+; CHECK-NEXT: [[L1_EPIL:%.*]] = load i32, ptr [[ARRAYIDX7_EPIL]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD9_EPIL]] = add i32 [[L1_EPIL]], [[SUM_EPIL]]
; CHECK-NEXT: [[ADD10_EPIL]] = add nuw i32 [[J_EPIL]], 1
; CHECK-NEXT: [[EXITCOND_EPIL:%.*]] = icmp eq i32 [[ADD10_EPIL]], [[E]]
; CHECK-NEXT: br i1 [[EXITCOND_EPIL]], label [[FOR_LATCH_EPIL:%.*]], label [[FOR_INNER_EPIL]]
; CHECK: for.latch.epil:
; CHECK-NEXT: [[ADD9_LCSSA_EPIL:%.*]] = phi i32 [ [[ADD9_EPIL]], [[FOR_INNER_EPIL]] ]
-; CHECK-NEXT: store i32 [[ADD9_LCSSA_EPIL]], i32* [[ARRAYIDX_EPIL]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD9_LCSSA_EPIL]], ptr [[ARRAYIDX_EPIL]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[EPIL_ITER_CMP:%.*]] = icmp ne i32 1, [[XTRAITER]]
; CHECK-NEXT: br i1 [[EPIL_ITER_CMP]], label [[FOR_OUTER_EPIL_1:%.*]], label [[FOR_END_LOOPEXIT_EPILOG_LCSSA:%.*]]
; CHECK: for.outer.epil.1:
-; CHECK-NEXT: [[ARRAYIDX_EPIL_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD_EPIL]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX_EPIL_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_EPIL]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX_EPIL_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_EPIL_1:%.*]] = add nuw i32 [[ADD_EPIL]], 1
-; CHECK-NEXT: [[ARRAYIDX2_EPIL_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD_EPIL_1]]
-; CHECK-NEXT: store i32 2, i32* [[ARRAYIDX2_EPIL_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX2_EPIL_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_EPIL_1]]
+; CHECK-NEXT: store i32 2, ptr [[ARRAYIDX2_EPIL_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: br label [[FOR_INNER_EPIL_1:%.*]]
; CHECK: for.inner.epil.1:
; CHECK-NEXT: [[SUM_EPIL_1:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL_1]] ], [ [[ADD9_EPIL_1:%.*]], [[FOR_INNER_EPIL_1]] ]
; CHECK-NEXT: [[J_EPIL_1:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL_1]] ], [ [[ADD10_EPIL_1:%.*]], [[FOR_INNER_EPIL_1]] ]
-; CHECK-NEXT: [[ARRAYIDX7_EPIL_1:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_EPIL_1]]
-; CHECK-NEXT: [[L1_EPIL_1:%.*]] = load i32, i32* [[ARRAYIDX7_EPIL_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX7_EPIL_1:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_EPIL_1]]
+; CHECK-NEXT: [[L1_EPIL_1:%.*]] = load i32, ptr [[ARRAYIDX7_EPIL_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD9_EPIL_1]] = add i32 [[L1_EPIL_1]], [[SUM_EPIL_1]]
; CHECK-NEXT: [[ADD10_EPIL_1]] = add nuw i32 [[J_EPIL_1]], 1
; CHECK-NEXT: [[EXITCOND_EPIL_1:%.*]] = icmp eq i32 [[ADD10_EPIL_1]], [[E]]
; CHECK-NEXT: br i1 [[EXITCOND_EPIL_1]], label [[FOR_LATCH_EPIL_1:%.*]], label [[FOR_INNER_EPIL_1]]
; CHECK: for.latch.epil.1:
; CHECK-NEXT: [[ADD9_LCSSA_EPIL_1:%.*]] = phi i32 [ [[ADD9_EPIL_1]], [[FOR_INNER_EPIL_1]] ]
-; CHECK-NEXT: store i32 [[ADD9_LCSSA_EPIL_1]], i32* [[ARRAYIDX_EPIL_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD9_LCSSA_EPIL_1]], ptr [[ARRAYIDX_EPIL_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[EPIL_ITER_CMP_1:%.*]] = icmp ne i32 2, [[XTRAITER]]
; CHECK-NEXT: br i1 [[EPIL_ITER_CMP_1]], label [[FOR_OUTER_EPIL_2:%.*]], label [[FOR_END_LOOPEXIT_EPILOG_LCSSA]]
; CHECK: for.outer.epil.2:
-; CHECK-NEXT: [[ARRAYIDX_EPIL_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD_EPIL_1]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX_EPIL_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_EPIL_1]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX_EPIL_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_EPIL_2:%.*]] = add nuw i32 [[ADD_EPIL_1]], 1
-; CHECK-NEXT: [[ARRAYIDX2_EPIL_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD_EPIL_2]]
-; CHECK-NEXT: store i32 2, i32* [[ARRAYIDX2_EPIL_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX2_EPIL_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_EPIL_2]]
+; CHECK-NEXT: store i32 2, ptr [[ARRAYIDX2_EPIL_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: br label [[FOR_INNER_EPIL_2:%.*]]
; CHECK: for.inner.epil.2:
; CHECK-NEXT: [[SUM_EPIL_2:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL_2]] ], [ [[ADD9_EPIL_2:%.*]], [[FOR_INNER_EPIL_2]] ]
; CHECK-NEXT: [[J_EPIL_2:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL_2]] ], [ [[ADD10_EPIL_2:%.*]], [[FOR_INNER_EPIL_2]] ]
-; CHECK-NEXT: [[ARRAYIDX7_EPIL_2:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_EPIL_2]]
-; CHECK-NEXT: [[L1_EPIL_2:%.*]] = load i32, i32* [[ARRAYIDX7_EPIL_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX7_EPIL_2:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_EPIL_2]]
+; CHECK-NEXT: [[L1_EPIL_2:%.*]] = load i32, ptr [[ARRAYIDX7_EPIL_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD9_EPIL_2]] = add i32 [[L1_EPIL_2]], [[SUM_EPIL_2]]
; CHECK-NEXT: [[ADD10_EPIL_2]] = add nuw i32 [[J_EPIL_2]], 1
; CHECK-NEXT: [[EXITCOND_EPIL_2:%.*]] = icmp eq i32 [[ADD10_EPIL_2]], [[E]]
; CHECK-NEXT: br i1 [[EXITCOND_EPIL_2]], label [[FOR_LATCH_EPIL_2:%.*]], label [[FOR_INNER_EPIL_2]]
; CHECK: for.latch.epil.2:
; CHECK-NEXT: [[ADD9_LCSSA_EPIL_2:%.*]] = phi i32 [ [[ADD9_EPIL_2]], [[FOR_INNER_EPIL_2]] ]
-; CHECK-NEXT: store i32 [[ADD9_LCSSA_EPIL_2]], i32* [[ARRAYIDX_EPIL_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD9_LCSSA_EPIL_2]], ptr [[ARRAYIDX_EPIL_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: br label [[FOR_END_LOOPEXIT_EPILOG_LCSSA]]
; CHECK: for.end.loopexit.epilog-lcssa:
; CHECK-NEXT: br label [[FOR_END_LOOPEXIT]]
for.outer:
%i = phi i32 [ %add, %for.latch ], [ 0, %for.preheader ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 0, i32* %arrayidx, align 4, !tbaa !5
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 0, ptr %arrayidx, align 4, !tbaa !5
%add = add nuw i32 %i, 1
- %arrayidx2 = getelementptr inbounds i32, i32* %A, i32 %add
- store i32 2, i32* %arrayidx2, align 4, !tbaa !5
+ %arrayidx2 = getelementptr inbounds i32, ptr %A, i32 %add
+ store i32 2, ptr %arrayidx2, align 4, !tbaa !5
br label %for.inner
for.latch:
- store i32 %add9, i32* %arrayidx, align 4, !tbaa !5
+ store i32 %add9, ptr %arrayidx, align 4, !tbaa !5
%exitcond30 = icmp eq i32 %add, %I
br i1 %exitcond30, label %for.end, label %for.outer
for.inner:
%sum = phi i32 [ 0, %for.outer ], [ %add9, %for.inner ]
%j = phi i32 [ 0, %for.outer ], [ %add10, %for.inner ]
- %arrayidx7 = getelementptr inbounds i32, i32* %B, i32 %j
- %l1 = load i32, i32* %arrayidx7, align 4, !tbaa !5
+ %arrayidx7 = getelementptr inbounds i32, ptr %B, i32 %j
+ %l1 = load i32, ptr %arrayidx7, align 4, !tbaa !5
%add9 = add i32 %l1, %sum
%add10 = add nuw i32 %j, 1
%exitcond = icmp eq i32 %add10, %E
; Same as test7 with an extra outer loop nest
-define void @test8(i32 %I, i32 %E, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @test8(i32 %I, i32 %E, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
; CHECK-LABEL: @test8(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[E:%.*]], 0
; CHECK: for.outer:
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[ADD_3:%.*]], [[FOR_LATCH:%.*]] ], [ 0, [[FOR_OUTEST_NEW]] ]
; CHECK-NEXT: [[NITER:%.*]] = phi i32 [ 0, [[FOR_OUTEST_NEW]] ], [ [[NITER_NEXT_3:%.*]], [[FOR_LATCH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[I]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[I]], 1
-; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD]]
-; CHECK-NEXT: store i32 2, i32* [[ARRAYIDX6]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD]]
+; CHECK-NEXT: store i32 2, ptr [[ARRAYIDX6]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[NITER_NEXT:%.*]] = add nuw nsw i32 [[NITER]], 1
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_1:%.*]] = add nuw nsw i32 [[ADD]], 1
-; CHECK-NEXT: [[ARRAYIDX6_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD_1]]
-; CHECK-NEXT: store i32 2, i32* [[ARRAYIDX6_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_1]]
+; CHECK-NEXT: store i32 2, ptr [[ARRAYIDX6_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[NITER_NEXT_1:%.*]] = add nuw nsw i32 [[NITER_NEXT]], 1
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD_1]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_1]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_2:%.*]] = add nuw nsw i32 [[ADD_1]], 1
-; CHECK-NEXT: [[ARRAYIDX6_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD_2]]
-; CHECK-NEXT: store i32 2, i32* [[ARRAYIDX6_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_2]]
+; CHECK-NEXT: store i32 2, ptr [[ARRAYIDX6_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[NITER_NEXT_2:%.*]] = add nuw nsw i32 [[NITER_NEXT_1]], 1
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD_2]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX_3]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_2]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX_3]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_3]] = add nuw i32 [[ADD_2]], 1
-; CHECK-NEXT: [[ARRAYIDX6_3:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD_3]]
-; CHECK-NEXT: store i32 2, i32* [[ARRAYIDX6_3]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_3:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_3]]
+; CHECK-NEXT: store i32 2, ptr [[ARRAYIDX6_3]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[NITER_NEXT_3]] = add i32 [[NITER_NEXT_2]], 1
; CHECK-NEXT: br label [[FOR_INNER:%.*]]
; CHECK: for.inner:
; CHECK-NEXT: [[J_2:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[ADD10_2:%.*]], [[FOR_INNER]] ]
; CHECK-NEXT: [[SUM_3:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[ADD9_3:%.*]], [[FOR_INNER]] ]
; CHECK-NEXT: [[J_3:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[ADD10_3:%.*]], [[FOR_INNER]] ]
-; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[J]]
-; CHECK-NEXT: [[L1:%.*]] = load i32, i32* [[ARRAYIDX11]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[J]]
+; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX11]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD9]] = add i32 [[L1]], [[SUM]]
; CHECK-NEXT: [[ADD10]] = add nuw i32 [[J]], 1
-; CHECK-NEXT: [[ARRAYIDX11_1:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_1]]
-; CHECK-NEXT: [[L1_1:%.*]] = load i32, i32* [[ARRAYIDX11_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX11_1:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_1]]
+; CHECK-NEXT: [[L1_1:%.*]] = load i32, ptr [[ARRAYIDX11_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD9_1]] = add i32 [[L1_1]], [[SUM_1]]
; CHECK-NEXT: [[ADD10_1]] = add nuw i32 [[J_1]], 1
-; CHECK-NEXT: [[ARRAYIDX11_2:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_2]]
-; CHECK-NEXT: [[L1_2:%.*]] = load i32, i32* [[ARRAYIDX11_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX11_2:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_2]]
+; CHECK-NEXT: [[L1_2:%.*]] = load i32, ptr [[ARRAYIDX11_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD9_2]] = add i32 [[L1_2]], [[SUM_2]]
; CHECK-NEXT: [[ADD10_2]] = add nuw i32 [[J_2]], 1
-; CHECK-NEXT: [[ARRAYIDX11_3:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_3]]
-; CHECK-NEXT: [[L1_3:%.*]] = load i32, i32* [[ARRAYIDX11_3]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX11_3:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_3]]
+; CHECK-NEXT: [[L1_3:%.*]] = load i32, ptr [[ARRAYIDX11_3]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD9_3]] = add i32 [[L1_3]], [[SUM_3]]
; CHECK-NEXT: [[ADD10_3]] = add nuw i32 [[J_3]], 1
; CHECK-NEXT: [[EXITCOND_3:%.*]] = icmp eq i32 [[ADD10_3]], [[E]]
; CHECK-NEXT: [[ADD9_LCSSA_1:%.*]] = phi i32 [ [[ADD9_1]], [[FOR_INNER]] ]
; CHECK-NEXT: [[ADD9_LCSSA_2:%.*]] = phi i32 [ [[ADD9_2]], [[FOR_INNER]] ]
; CHECK-NEXT: [[ADD9_LCSSA_3:%.*]] = phi i32 [ [[ADD9_3]], [[FOR_INNER]] ]
-; CHECK-NEXT: store i32 [[ADD9_LCSSA]], i32* [[ARRAYIDX]], align 4, !tbaa [[TBAA0]]
-; CHECK-NEXT: store i32 [[ADD9_LCSSA_1]], i32* [[ARRAYIDX_1]], align 4, !tbaa [[TBAA0]]
-; CHECK-NEXT: store i32 [[ADD9_LCSSA_2]], i32* [[ARRAYIDX_2]], align 4, !tbaa [[TBAA0]]
-; CHECK-NEXT: store i32 [[ADD9_LCSSA_3]], i32* [[ARRAYIDX_3]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD9_LCSSA]], ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD9_LCSSA_1]], ptr [[ARRAYIDX_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD9_LCSSA_2]], ptr [[ARRAYIDX_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD9_LCSSA_3]], ptr [[ARRAYIDX_3]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i32 [[NITER_NEXT_3]], [[UNROLL_ITER]]
; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label [[FOR_CLEANUP_UNR_LCSSA_LOOPEXIT:%.*]], label [[FOR_OUTER]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: for.cleanup.unr-lcssa.loopexit:
; CHECK: for.outer.epil.preheader:
; CHECK-NEXT: br label [[FOR_OUTER_EPIL:%.*]]
; CHECK: for.outer.epil:
-; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[I_UNR]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX_EPIL]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[I_UNR]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX_EPIL]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_EPIL:%.*]] = add nuw i32 [[I_UNR]], 1
-; CHECK-NEXT: [[ARRAYIDX6_EPIL:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD_EPIL]]
-; CHECK-NEXT: store i32 2, i32* [[ARRAYIDX6_EPIL]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_EPIL]]
+; CHECK-NEXT: store i32 2, ptr [[ARRAYIDX6_EPIL]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: br label [[FOR_INNER_EPIL:%.*]]
; CHECK: for.inner.epil:
; CHECK-NEXT: [[SUM_EPIL:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL]] ], [ [[ADD9_EPIL:%.*]], [[FOR_INNER_EPIL]] ]
; CHECK-NEXT: [[J_EPIL:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL]] ], [ [[ADD10_EPIL:%.*]], [[FOR_INNER_EPIL]] ]
-; CHECK-NEXT: [[ARRAYIDX11_EPIL:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_EPIL]]
-; CHECK-NEXT: [[L1_EPIL:%.*]] = load i32, i32* [[ARRAYIDX11_EPIL]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX11_EPIL:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_EPIL]]
+; CHECK-NEXT: [[L1_EPIL:%.*]] = load i32, ptr [[ARRAYIDX11_EPIL]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD9_EPIL]] = add i32 [[L1_EPIL]], [[SUM_EPIL]]
; CHECK-NEXT: [[ADD10_EPIL]] = add nuw i32 [[J_EPIL]], 1
; CHECK-NEXT: [[EXITCOND_EPIL:%.*]] = icmp eq i32 [[ADD10_EPIL]], [[E]]
; CHECK-NEXT: br i1 [[EXITCOND_EPIL]], label [[FOR_LATCH_EPIL:%.*]], label [[FOR_INNER_EPIL]]
; CHECK: for.latch.epil:
; CHECK-NEXT: [[ADD9_LCSSA_EPIL:%.*]] = phi i32 [ [[ADD9_EPIL]], [[FOR_INNER_EPIL]] ]
-; CHECK-NEXT: store i32 [[ADD9_LCSSA_EPIL]], i32* [[ARRAYIDX_EPIL]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD9_LCSSA_EPIL]], ptr [[ARRAYIDX_EPIL]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[EPIL_ITER_CMP:%.*]] = icmp ne i32 1, [[XTRAITER]]
; CHECK-NEXT: br i1 [[EPIL_ITER_CMP]], label [[FOR_OUTER_EPIL_1:%.*]], label [[FOR_CLEANUP_EPILOG_LCSSA:%.*]]
; CHECK: for.outer.epil.1:
-; CHECK-NEXT: [[ARRAYIDX_EPIL_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD_EPIL]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX_EPIL_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_EPIL]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX_EPIL_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_EPIL_1:%.*]] = add nuw i32 [[ADD_EPIL]], 1
-; CHECK-NEXT: [[ARRAYIDX6_EPIL_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD_EPIL_1]]
-; CHECK-NEXT: store i32 2, i32* [[ARRAYIDX6_EPIL_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_EPIL_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_EPIL_1]]
+; CHECK-NEXT: store i32 2, ptr [[ARRAYIDX6_EPIL_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: br label [[FOR_INNER_EPIL_1:%.*]]
; CHECK: for.inner.epil.1:
; CHECK-NEXT: [[SUM_EPIL_1:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL_1]] ], [ [[ADD9_EPIL_1:%.*]], [[FOR_INNER_EPIL_1]] ]
; CHECK-NEXT: [[J_EPIL_1:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL_1]] ], [ [[ADD10_EPIL_1:%.*]], [[FOR_INNER_EPIL_1]] ]
-; CHECK-NEXT: [[ARRAYIDX11_EPIL_1:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_EPIL_1]]
-; CHECK-NEXT: [[L1_EPIL_1:%.*]] = load i32, i32* [[ARRAYIDX11_EPIL_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX11_EPIL_1:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_EPIL_1]]
+; CHECK-NEXT: [[L1_EPIL_1:%.*]] = load i32, ptr [[ARRAYIDX11_EPIL_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD9_EPIL_1]] = add i32 [[L1_EPIL_1]], [[SUM_EPIL_1]]
; CHECK-NEXT: [[ADD10_EPIL_1]] = add nuw i32 [[J_EPIL_1]], 1
; CHECK-NEXT: [[EXITCOND_EPIL_1:%.*]] = icmp eq i32 [[ADD10_EPIL_1]], [[E]]
; CHECK-NEXT: br i1 [[EXITCOND_EPIL_1]], label [[FOR_LATCH_EPIL_1:%.*]], label [[FOR_INNER_EPIL_1]]
; CHECK: for.latch.epil.1:
; CHECK-NEXT: [[ADD9_LCSSA_EPIL_1:%.*]] = phi i32 [ [[ADD9_EPIL_1]], [[FOR_INNER_EPIL_1]] ]
-; CHECK-NEXT: store i32 [[ADD9_LCSSA_EPIL_1]], i32* [[ARRAYIDX_EPIL_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD9_LCSSA_EPIL_1]], ptr [[ARRAYIDX_EPIL_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[EPIL_ITER_CMP_1:%.*]] = icmp ne i32 2, [[XTRAITER]]
; CHECK-NEXT: br i1 [[EPIL_ITER_CMP_1]], label [[FOR_OUTER_EPIL_2:%.*]], label [[FOR_CLEANUP_EPILOG_LCSSA]]
; CHECK: for.outer.epil.2:
-; CHECK-NEXT: [[ARRAYIDX_EPIL_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD_EPIL_1]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX_EPIL_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_EPIL_1]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX_EPIL_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD_EPIL_2:%.*]] = add nuw i32 [[ADD_EPIL_1]], 1
-; CHECK-NEXT: [[ARRAYIDX6_EPIL_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD_EPIL_2]]
-; CHECK-NEXT: store i32 2, i32* [[ARRAYIDX6_EPIL_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_EPIL_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_EPIL_2]]
+; CHECK-NEXT: store i32 2, ptr [[ARRAYIDX6_EPIL_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: br label [[FOR_INNER_EPIL_2:%.*]]
; CHECK: for.inner.epil.2:
; CHECK-NEXT: [[SUM_EPIL_2:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL_2]] ], [ [[ADD9_EPIL_2:%.*]], [[FOR_INNER_EPIL_2]] ]
; CHECK-NEXT: [[J_EPIL_2:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL_2]] ], [ [[ADD10_EPIL_2:%.*]], [[FOR_INNER_EPIL_2]] ]
-; CHECK-NEXT: [[ARRAYIDX11_EPIL_2:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_EPIL_2]]
-; CHECK-NEXT: [[L1_EPIL_2:%.*]] = load i32, i32* [[ARRAYIDX11_EPIL_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX11_EPIL_2:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_EPIL_2]]
+; CHECK-NEXT: [[L1_EPIL_2:%.*]] = load i32, ptr [[ARRAYIDX11_EPIL_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD9_EPIL_2]] = add i32 [[L1_EPIL_2]], [[SUM_EPIL_2]]
; CHECK-NEXT: [[ADD10_EPIL_2]] = add nuw i32 [[J_EPIL_2]], 1
; CHECK-NEXT: [[EXITCOND_EPIL_2:%.*]] = icmp eq i32 [[ADD10_EPIL_2]], [[E]]
; CHECK-NEXT: br i1 [[EXITCOND_EPIL_2]], label [[FOR_LATCH_EPIL_2:%.*]], label [[FOR_INNER_EPIL_2]]
; CHECK: for.latch.epil.2:
; CHECK-NEXT: [[ADD9_LCSSA_EPIL_2:%.*]] = phi i32 [ [[ADD9_EPIL_2]], [[FOR_INNER_EPIL_2]] ]
-; CHECK-NEXT: store i32 [[ADD9_LCSSA_EPIL_2]], i32* [[ARRAYIDX_EPIL_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: store i32 [[ADD9_LCSSA_EPIL_2]], ptr [[ARRAYIDX_EPIL_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: br label [[FOR_CLEANUP_EPILOG_LCSSA]]
; CHECK: for.cleanup.epilog-lcssa:
; CHECK-NEXT: br label [[FOR_CLEANUP]]
for.outer:
%i = phi i32 [ %add, %for.latch ], [ 0, %for.outest ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 0, i32* %arrayidx, align 4, !tbaa !5
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 0, ptr %arrayidx, align 4, !tbaa !5
%add = add nuw i32 %i, 1
- %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %add
- store i32 2, i32* %arrayidx6, align 4, !tbaa !5
+ %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %add
+ store i32 2, ptr %arrayidx6, align 4, !tbaa !5
br label %for.inner
for.inner:
%sum = phi i32 [ 0, %for.outer ], [ %add9, %for.inner ]
%j = phi i32 [ 0, %for.outer ], [ %add10, %for.inner ]
- %arrayidx11 = getelementptr inbounds i32, i32* %B, i32 %j
- %l1 = load i32, i32* %arrayidx11, align 4, !tbaa !5
+ %arrayidx11 = getelementptr inbounds i32, ptr %B, i32 %j
+ %l1 = load i32, ptr %arrayidx11, align 4, !tbaa !5
%add9 = add i32 %l1, %sum
%add10 = add nuw i32 %j, 1
%exitcond = icmp eq i32 %add10, %E
br i1 %exitcond, label %for.latch, label %for.inner
for.latch:
- store i32 %add9, i32* %arrayidx, align 4, !tbaa !5
+ store i32 %add9, ptr %arrayidx, align 4, !tbaa !5
%exitcond39 = icmp eq i32 %add, %I
br i1 %exitcond39, label %for.cleanup, label %for.outer
; Same as test1 with tbaa, not noalias
-define void @test9(i32 %I, i32 %E, i32* nocapture %A, i16* nocapture readonly %B) #0 {
+define void @test9(i32 %I, i32 %E, ptr nocapture %A, ptr nocapture readonly %B) #0 {
; CHECK-LABEL: @test9(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[E:%.*]], 0
; CHECK-NEXT: [[SUM_2:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[ADD_2:%.*]], [[FOR_INNER]] ]
; CHECK-NEXT: [[J_3:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[INC_3:%.*]], [[FOR_INNER]] ]
; CHECK-NEXT: [[SUM_3:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[ADD_3:%.*]], [[FOR_INNER]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[B:%.*]], i32 [[J]]
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, i16* [[ARRAYIDX]], align 4, !tbaa [[TBAA10:![0-9]+]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[B:%.*]], i32 [[J]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA10:![0-9]+]]
; CHECK-NEXT: [[SEXT:%.*]] = sext i16 [[TMP2]] to i32
; CHECK-NEXT: [[ADD]] = add i32 [[SEXT]], [[SUM]]
; CHECK-NEXT: [[INC]] = add nuw i32 [[J]], 1
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i16, i16* [[B]], i32 [[J_1]]
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, i16* [[ARRAYIDX_1]], align 4, !tbaa [[TBAA10]]
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[J_1]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr [[ARRAYIDX_1]], align 4, !tbaa [[TBAA10]]
; CHECK-NEXT: [[SEXT_1:%.*]] = sext i16 [[TMP3]] to i32
; CHECK-NEXT: [[ADD_1]] = add i32 [[SEXT_1]], [[SUM_1]]
; CHECK-NEXT: [[INC_1]] = add nuw i32 [[J_1]], 1
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i16, i16* [[B]], i32 [[J_2]]
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, i16* [[ARRAYIDX_2]], align 4, !tbaa [[TBAA10]]
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[J_2]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr [[ARRAYIDX_2]], align 4, !tbaa [[TBAA10]]
; CHECK-NEXT: [[SEXT_2:%.*]] = sext i16 [[TMP4]] to i32
; CHECK-NEXT: [[ADD_2]] = add i32 [[SEXT_2]], [[SUM_2]]
; CHECK-NEXT: [[INC_2]] = add nuw i32 [[J_2]], 1
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i16, i16* [[B]], i32 [[J_3]]
-; CHECK-NEXT: [[TMP5:%.*]] = load i16, i16* [[ARRAYIDX_3]], align 4, !tbaa [[TBAA10]]
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[J_3]]
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[ARRAYIDX_3]], align 4, !tbaa [[TBAA10]]
; CHECK-NEXT: [[SEXT_3:%.*]] = sext i16 [[TMP5]] to i32
; CHECK-NEXT: [[ADD_3]] = add i32 [[SEXT_3]], [[SUM_3]]
; CHECK-NEXT: [[INC_3]] = add nuw i32 [[J_3]], 1
; CHECK-NEXT: [[ADD_LCSSA_1:%.*]] = phi i32 [ [[ADD_1]], [[FOR_INNER]] ]
; CHECK-NEXT: [[ADD_LCSSA_2:%.*]] = phi i32 [ [[ADD_2]], [[FOR_INNER]] ]
; CHECK-NEXT: [[ADD_LCSSA_3:%.*]] = phi i32 [ [[ADD_3]], [[FOR_INNER]] ]
-; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[I]]
-; CHECK-NEXT: store i32 [[ADD_LCSSA]], i32* [[ARRAYIDX6]], align 4, !tbaa [[TBAA0]]
-; CHECK-NEXT: [[ARRAYIDX6_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD8]]
-; CHECK-NEXT: store i32 [[ADD_LCSSA_1]], i32* [[ARRAYIDX6_1]], align 4, !tbaa [[TBAA0]]
-; CHECK-NEXT: [[ARRAYIDX6_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD8_1]]
-; CHECK-NEXT: store i32 [[ADD_LCSSA_2]], i32* [[ARRAYIDX6_2]], align 4, !tbaa [[TBAA0]]
-; CHECK-NEXT: [[ARRAYIDX6_3:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD8_2]]
-; CHECK-NEXT: store i32 [[ADD_LCSSA_3]], i32* [[ARRAYIDX6_3]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA]], ptr [[ARRAYIDX6]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD8]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA_1]], ptr [[ARRAYIDX6_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD8_1]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA_2]], ptr [[ARRAYIDX6_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_3:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD8_2]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA_3]], ptr [[ARRAYIDX6_3]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i32 [[NITER_NEXT_3]], [[UNROLL_ITER]]
; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label [[FOR_END_LOOPEXIT_UNR_LCSSA_LOOPEXIT:%.*]], label [[FOR_OUTER]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: for.end.loopexit.unr-lcssa.loopexit:
; CHECK: for.inner.epil:
; CHECK-NEXT: [[J_EPIL:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL]] ], [ [[INC_EPIL:%.*]], [[FOR_INNER_EPIL]] ]
; CHECK-NEXT: [[SUM_EPIL:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL]] ], [ [[ADD_EPIL:%.*]], [[FOR_INNER_EPIL]] ]
-; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i16, i16* [[B]], i32 [[J_EPIL]]
-; CHECK-NEXT: [[TMP6:%.*]] = load i16, i16* [[ARRAYIDX_EPIL]], align 4, !tbaa [[TBAA10]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[J_EPIL]]
+; CHECK-NEXT: [[TMP6:%.*]] = load i16, ptr [[ARRAYIDX_EPIL]], align 4, !tbaa [[TBAA10]]
; CHECK-NEXT: [[SEXT_EPIL:%.*]] = sext i16 [[TMP6]] to i32
; CHECK-NEXT: [[ADD_EPIL]] = add i32 [[SEXT_EPIL]], [[SUM_EPIL]]
; CHECK-NEXT: [[INC_EPIL]] = add nuw i32 [[J_EPIL]], 1
; CHECK-NEXT: br i1 [[EXITCOND_EPIL]], label [[FOR_LATCH_EPIL:%.*]], label [[FOR_INNER_EPIL]]
; CHECK: for.latch.epil:
; CHECK-NEXT: [[ADD_LCSSA_EPIL:%.*]] = phi i32 [ [[ADD_EPIL]], [[FOR_INNER_EPIL]] ]
-; CHECK-NEXT: [[ARRAYIDX6_EPIL:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[I_UNR]]
-; CHECK-NEXT: store i32 [[ADD_LCSSA_EPIL]], i32* [[ARRAYIDX6_EPIL]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[I_UNR]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA_EPIL]], ptr [[ARRAYIDX6_EPIL]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD8_EPIL:%.*]] = add nuw i32 [[I_UNR]], 1
; CHECK-NEXT: [[EPIL_ITER_CMP:%.*]] = icmp ne i32 1, [[XTRAITER]]
; CHECK-NEXT: br i1 [[EPIL_ITER_CMP]], label [[FOR_OUTER_EPIL_1:%.*]], label [[FOR_END_LOOPEXIT_EPILOG_LCSSA:%.*]]
; CHECK: for.inner.epil.1:
; CHECK-NEXT: [[J_EPIL_1:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL_1]] ], [ [[INC_EPIL_1:%.*]], [[FOR_INNER_EPIL_1]] ]
; CHECK-NEXT: [[SUM_EPIL_1:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL_1]] ], [ [[ADD_EPIL_1:%.*]], [[FOR_INNER_EPIL_1]] ]
-; CHECK-NEXT: [[ARRAYIDX_EPIL_1:%.*]] = getelementptr inbounds i16, i16* [[B]], i32 [[J_EPIL_1]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i16, i16* [[ARRAYIDX_EPIL_1]], align 4, !tbaa [[TBAA10]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL_1:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[J_EPIL_1]]
+; CHECK-NEXT: [[TMP7:%.*]] = load i16, ptr [[ARRAYIDX_EPIL_1]], align 4, !tbaa [[TBAA10]]
; CHECK-NEXT: [[SEXT_EPIL_1:%.*]] = sext i16 [[TMP7]] to i32
; CHECK-NEXT: [[ADD_EPIL_1]] = add i32 [[SEXT_EPIL_1]], [[SUM_EPIL_1]]
; CHECK-NEXT: [[INC_EPIL_1]] = add nuw i32 [[J_EPIL_1]], 1
; CHECK-NEXT: br i1 [[EXITCOND_EPIL_1]], label [[FOR_LATCH_EPIL_1:%.*]], label [[FOR_INNER_EPIL_1]]
; CHECK: for.latch.epil.1:
; CHECK-NEXT: [[ADD_LCSSA_EPIL_1:%.*]] = phi i32 [ [[ADD_EPIL_1]], [[FOR_INNER_EPIL_1]] ]
-; CHECK-NEXT: [[ARRAYIDX6_EPIL_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD8_EPIL]]
-; CHECK-NEXT: store i32 [[ADD_LCSSA_EPIL_1]], i32* [[ARRAYIDX6_EPIL_1]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_EPIL_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD8_EPIL]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA_EPIL_1]], ptr [[ARRAYIDX6_EPIL_1]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[ADD8_EPIL_1:%.*]] = add nuw i32 [[ADD8_EPIL]], 1
; CHECK-NEXT: [[EPIL_ITER_CMP_1:%.*]] = icmp ne i32 2, [[XTRAITER]]
; CHECK-NEXT: br i1 [[EPIL_ITER_CMP_1]], label [[FOR_OUTER_EPIL_2:%.*]], label [[FOR_END_LOOPEXIT_EPILOG_LCSSA]]
; CHECK: for.inner.epil.2:
; CHECK-NEXT: [[J_EPIL_2:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL_2]] ], [ [[INC_EPIL_2:%.*]], [[FOR_INNER_EPIL_2]] ]
; CHECK-NEXT: [[SUM_EPIL_2:%.*]] = phi i32 [ 0, [[FOR_OUTER_EPIL_2]] ], [ [[ADD_EPIL_2:%.*]], [[FOR_INNER_EPIL_2]] ]
-; CHECK-NEXT: [[ARRAYIDX_EPIL_2:%.*]] = getelementptr inbounds i16, i16* [[B]], i32 [[J_EPIL_2]]
-; CHECK-NEXT: [[TMP8:%.*]] = load i16, i16* [[ARRAYIDX_EPIL_2]], align 4, !tbaa [[TBAA10]]
+; CHECK-NEXT: [[ARRAYIDX_EPIL_2:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[J_EPIL_2]]
+; CHECK-NEXT: [[TMP8:%.*]] = load i16, ptr [[ARRAYIDX_EPIL_2]], align 4, !tbaa [[TBAA10]]
; CHECK-NEXT: [[SEXT_EPIL_2:%.*]] = sext i16 [[TMP8]] to i32
; CHECK-NEXT: [[ADD_EPIL_2]] = add i32 [[SEXT_EPIL_2]], [[SUM_EPIL_2]]
; CHECK-NEXT: [[INC_EPIL_2]] = add nuw i32 [[J_EPIL_2]], 1
; CHECK-NEXT: br i1 [[EXITCOND_EPIL_2]], label [[FOR_LATCH_EPIL_2:%.*]], label [[FOR_INNER_EPIL_2]]
; CHECK: for.latch.epil.2:
; CHECK-NEXT: [[ADD_LCSSA_EPIL_2:%.*]] = phi i32 [ [[ADD_EPIL_2]], [[FOR_INNER_EPIL_2]] ]
-; CHECK-NEXT: [[ARRAYIDX6_EPIL_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[ADD8_EPIL_1]]
-; CHECK-NEXT: store i32 [[ADD_LCSSA_EPIL_2]], i32* [[ARRAYIDX6_EPIL_2]], align 4, !tbaa [[TBAA0]]
+; CHECK-NEXT: [[ARRAYIDX6_EPIL_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD8_EPIL_1]]
+; CHECK-NEXT: store i32 [[ADD_LCSSA_EPIL_2]], ptr [[ARRAYIDX6_EPIL_2]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: br label [[FOR_END_LOOPEXIT_EPILOG_LCSSA]]
; CHECK: for.end.loopexit.epilog-lcssa:
; CHECK-NEXT: br label [[FOR_END_LOOPEXIT]]
for.inner:
%j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
%sum = phi i32 [ 0, %for.outer ], [ %add, %for.inner ]
- %arrayidx = getelementptr inbounds i16, i16* %B, i32 %j
- %0 = load i16, i16* %arrayidx, align 4, !tbaa !9
+ %arrayidx = getelementptr inbounds i16, ptr %B, i32 %j
+ %0 = load i16, ptr %arrayidx, align 4, !tbaa !9
%sext = sext i16 %0 to i32
%add = add i32 %sext, %sum
%inc = add nuw i32 %j, 1
for.latch:
%add.lcssa = phi i32 [ %add, %for.inner ]
- %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %i
- store i32 %add.lcssa, i32* %arrayidx6, align 4, !tbaa !5
+ %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %i
+ store i32 %add.lcssa, ptr %arrayidx6, align 4, !tbaa !5
%add8 = add nuw i32 %i, 1
%exitcond25 = icmp eq i32 %add8, %I
br i1 %exitcond25, label %for.end.loopexit, label %for.outer
define signext i16 @test10(i32 %k) #0 {
; CHECK-LABEL: @test10(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i8, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @c, i64 0, i64 0), align 1
+; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr @c, align 1
; CHECK-NEXT: [[TOBOOL9:%.*]] = icmp eq i8 [[TMP0]], 0
; CHECK-NEXT: [[TOBOOL13:%.*]] = icmp ne i32 [[K:%.*]], 0
; CHECK-NEXT: br i1 false, label [[FOR_END26_UNR_LCSSA:%.*]], label [[ENTRY_NEW:%.*]]
; CHECK-NEXT: [[DEC_LCSSA_LCSSA:%.*]] = phi i64 [ [[DEC_LCSSA_LCSSA_PH]], [[FOR_END26_UNR_LCSSA]] ], [ 0, [[FOR_INC24_EPIL]] ]
; CHECK-NEXT: [[STOREMERGE_4_LCSSA_LCSSA:%.*]] = phi i64 [ [[STOREMERGE_4_LCSSA_LCSSA_PH]], [[FOR_END26_UNR_LCSSA]] ], [ [[STOREMERGE_4_LCSSA_EPIL]], [[FOR_INC24_EPIL]] ]
; CHECK-NEXT: [[STOREMERGE_5_LCSSA_LCSSA:%.*]] = phi i32 [ [[STOREMERGE_5_LCSSA_LCSSA_PH]], [[FOR_END26_UNR_LCSSA]] ], [ 0, [[FOR_INC24_EPIL]] ]
-; CHECK-NEXT: store i64 [[DEC_LCSSA_LCSSA]], i64* getelementptr inbounds ([[STRUCT_A:%.*]], %struct.a* @g, i64 0, i32 0), align 8
+; CHECK-NEXT: store i64 [[DEC_LCSSA_LCSSA]], ptr @g, align 8
; CHECK-NEXT: ret i16 0
; CHECK: for.body2.split2.1:
; CHECK-NEXT: br i1 [[TOBOOL13]], label [[FOR_INC21_1:%.*]], label [[FOR_INC21_IF_1:%.*]]
; CHECK-NEXT: br i1 [[TOBOOL_3]], label [[FOR_INC24]], label [[FOR_BODY2]]
;
entry:
- %0 = load i8, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @c, i64 0, i64 0), align 1
+ %0 = load i8, ptr @c, align 1
%tobool9 = icmp eq i8 %0, 0
%tobool13 = icmp ne i32 %k, 0
br label %for.body
%dec.lcssa.lcssa = phi i64 [ 0, %for.inc24 ]
%storemerge.4.lcssa.lcssa = phi i64 [ %storemerge.4.lcssa, %for.inc24 ]
%storemerge.5.lcssa.lcssa = phi i32 [ %storemerge.5.lcssa, %for.inc24 ]
- store i64 %dec.lcssa.lcssa, i64* getelementptr inbounds (%struct.a, %struct.a* @g, i64 0, i32 0), align 8
+ store i64 %dec.lcssa.lcssa, ptr @g, align 8
ret i16 0
}
; CHECK-LABEL: @f(
; CHECK: call i32 @llvm.convergent(
; CHECK-NOT: call i32 @llvm.convergent(
-define void @f(i32* %a, i32* %b, i32* %c) #0 {
+define void @f(ptr %a, ptr %b, ptr %c) #0 {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
+ %loadA = load i32, ptr %arrayidxA, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
- %loadB = load i32, i32* %arrayidxB, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %ind
+ %loadB = load i32, ptr %arrayidxB, align 4
%convergentB = call i32 @llvm.convergent(i32 %loadB)
%mulC = mul i32 %loadA, %convergentB
- %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
- store i32 %mulC, i32* %arrayidxC, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %ind
+ store i32 %mulC, ptr %arrayidxC, align 4
%add = add nuw nsw i64 %ind, 1
%exitcond = icmp eq i64 %add, 20
br label %bb1
bb1:
- %tmp1 = load i16, i16* @c1
+ %tmp1 = load i16, ptr @c1
br label %bb2
bb2:
%tmp2 = phi i16 [ %tmp1, %bb1 ], [ %tmp3, %bb2 ]
- %tmp4 = getelementptr inbounds [1 x i32], [1 x i32]* undef, i32 0, i32 4
- store i32 1, i32* %tmp4
- %tmp5 = getelementptr inbounds [1 x i32], [1 x i32]* undef, i32 0, i32 9
- store i32 0, i32* %tmp5
+ %tmp4 = getelementptr inbounds [1 x i32], ptr undef, i32 0, i32 4
+ store i32 1, ptr %tmp4
+ %tmp5 = getelementptr inbounds [1 x i32], ptr undef, i32 0, i32 9
+ store i32 0, ptr %tmp5
%tmp3 = add i16 %tmp2, 1
- store i16 %tmp2, i16* @c1
+ store i16 %tmp2, ptr @c1
%tmp6 = icmp sle i16 %tmp3, 0
br i1 %tmp6, label %bb2, label %bb0
}
bb6: ; preds = %bb6.lr.ph, %bb6
%_tmp1423 = phi i64 [ undef, %bb6.lr.ph ], [ %_tmp142, %bb6 ]
- %_tmp123 = getelementptr [2 x [3 x [5 x i16]]], [2 x [3 x [5 x i16]]]* @x, i16 0, i64 undef
- %_tmp126 = getelementptr [3 x [5 x i16]], [3 x [5 x i16]]* %_tmp123, i16 0, i64 %_tmp1423
- %_tmp129 = getelementptr [5 x i16], [5 x i16]* %_tmp126, i16 0, i64 undef
- %_tmp130 = load i16, i16* %_tmp129
- store i16 undef, i16* getelementptr ([2 x [3 x [5 x i16]]], [2 x [3 x [5 x i16]]]* @x, i64 0, i64 undef, i64 undef, i64 undef)
+ %_tmp123 = getelementptr [2 x [3 x [5 x i16]]], ptr @x, i16 0, i64 undef
+ %_tmp126 = getelementptr [3 x [5 x i16]], ptr %_tmp123, i16 0, i64 %_tmp1423
+ %_tmp129 = getelementptr [5 x i16], ptr %_tmp126, i16 0, i64 undef
+ %_tmp130 = load i16, ptr %_tmp129
+ store i16 undef, ptr getelementptr ([2 x [3 x [5 x i16]]], ptr @x, i64 0, i64 undef, i64 undef, i64 undef)
%_tmp142 = add i64 %_tmp1423, 1
br i1 false, label %bb6, label %loop.exit
bb6: ; preds = %bb6.lr.ph, %bb6
%_tmp1423 = phi i64 [ undef, %bb6.lr.ph ], [ %_tmp142, %bb6 ]
- %_tmp123 = getelementptr [2 x [3 x [5 x i16]]], [2 x [3 x [5 x i16]]]* @x, i16 0, i64 undef
- %_tmp126 = getelementptr [3 x [5 x i16]], [3 x [5 x i16]]* %_tmp123, i16 0, i64 %_tmp1423
- %_tmp129 = getelementptr [5 x i16], [5 x i16]* %_tmp126, i16 0, i64 undef
- %_tmp130 = load i16, i16* %_tmp129
- store i16 undef, i16* getelementptr ([2 x [3 x [5 x i16]]], [2 x [3 x [5 x i16]]]* @x, i64 0, i64 undef, i64 undef, i64 undef)
+ %_tmp123 = getelementptr [2 x [3 x [5 x i16]]], ptr @x, i16 0, i64 undef
+ %_tmp126 = getelementptr [3 x [5 x i16]], ptr %_tmp123, i16 0, i64 %_tmp1423
+ %_tmp129 = getelementptr [5 x i16], ptr %_tmp126, i16 0, i64 undef
+ %_tmp130 = load i16, ptr %_tmp129
+ store i16 undef, ptr getelementptr ([2 x [3 x [5 x i16]]], ptr @x, i64 0, i64 undef, i64 undef, i64 undef)
%_tmp142 = add i64 %_tmp1423, 1
br i1 false, label %bb6, label %loop.exit
%Partials.215 = type { [2 x %Dual.213] }
; Function Attrs: sspreq
-define void @"julia_axpy!_65480"(%Dual.212*, %Dual.212* %other) {
+define void @"julia_axpy!_65480"(ptr, ptr %other) {
top:
br label %if24
-; CHECK-NOT: %bc = bitcast i64* %v2.sroa.0.0..sroa_cast
-; CHECK: %bound0 = icmp ult i8* %[[x:[a-z0-9]+]], %[[y:[a-z0-9]+]]
-; CHECK-NOT: %bound1 = icmp ult i8* %[[y]], %[[x]]
+; CHECK-NOT: %bc = bitcast ptr %v2.sroa.0.0..sroa_cast
+; CHECK: %bound0 = icmp ult ptr %[[x:[a-z0-9]+]], %[[y:[a-z0-9]+]]
+; CHECK-NOT: %bound1 = icmp ult ptr %[[y]], %[[x]]
if24: ; preds = %if24, %top
%"#temp#1.sroa.3.02" = phi i64 [ undef, %top ], [ %2, %if24 ]
%1 = add i64 %"#temp#1.sroa.0.01", 1
%2 = add i64 %"#temp#1.sroa.3.02", 1
; This pointer is loop invariant. LAA used to re-use it from memcheck, even though it didn't dominate.
- %v2.sroa.0.0..sroa_cast = bitcast %Dual.212* %0 to i64*
- %v2.sroa.0.0.copyload = load i64, i64* %v2.sroa.0.0..sroa_cast, align 1
+ %v2.sroa.0.0.copyload = load i64, ptr %0, align 1
%3 = add i64 %"#temp#1.sroa.0.01", -1
- %4 = getelementptr inbounds %Dual.212, %Dual.212* %other, i64 0, i32 1, i32 0, i64 0, i32 1, i32 0, i64 0
- %5 = bitcast double* %4 to i64*
- store i64 undef, i64* %5, align 8
+ %4 = getelementptr inbounds %Dual.212, ptr %other, i64 0, i32 1, i32 0, i64 0, i32 1, i32 0, i64 0
+ store i64 undef, ptr %4, align 8
%notlhs27 = icmp eq i64 %2, undef
%notrhs28 = icmp eq i64 %1, undef
- %6 = or i1 %notrhs28, %notlhs27
- br i1 %6, label %L41.L335_crit_edge, label %if24
+ %5 = or i1 %notrhs28, %notlhs27
+ br i1 %5, label %L41.L335_crit_edge, label %if24
L41.L335_crit_edge: ; preds = %if24
ret void
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-@B = common global i32* null, align 8
-@A = common global i32* null, align 8
-@C = common global i32* null, align 8
-@D = common global i32* null, align 8
-@E = common global i32* null, align 8
+@B = common global ptr null, align 8
+@A = common global ptr null, align 8
+@C = common global ptr null, align 8
+@D = common global ptr null, align 8
+@E = common global ptr null, align 8
define void @f() {
entry:
- %a = load i32*, i32** @A, align 8
- %b = load i32*, i32** @B, align 8
- %c = load i32*, i32** @C, align 8
- %d = load i32*, i32** @D, align 8
- %e = load i32*, i32** @E, align 8
+ %a = load ptr, ptr @A, align 8
+ %b = load ptr, ptr @B, align 8
+ %c = load ptr, ptr @C, align 8
+ %d = load ptr, ptr @D, align 8
+ %e = load ptr, ptr @E, align 8
br label %for.body
for.body: ; preds = %for.body, %entry
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
; CHECK: %loadA.ldist1 = {{.*}} !noalias !25
; A noalias C: !25 -> { 17(15), 18(15), 19(15), 26(24) }
; ^^^^^^
- %loadA = load i32, i32* %arrayidxA, align 4
+ %loadA = load i32, ptr %arrayidxA, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
- %loadB = load i32, i32* %arrayidxB, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %ind
+ %loadB = load i32, ptr %arrayidxB, align 4
%mulA = mul i32 %loadB, %loadA
%add = add nuw nsw i64 %ind, 1
- %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
- store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i64 %add
+ store i32 %mulA, ptr %arrayidxA_plus_4, align 4
; CHECK: for.body:
- %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
+ %arrayidxD = getelementptr inbounds i32, ptr %d, i64 %ind
; CHECK: %loadD = {{.*}} !alias.scope !31
; D's scope: !31 -> { 18(15), 32(33) }
; ^^^^^^
- %loadD = load i32, i32* %arrayidxD, align 4
+ %loadD = load i32, ptr %arrayidxD, align 4
- %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
+ %arrayidxE = getelementptr inbounds i32, ptr %e, i64 %ind
; CHECK: %loadE = {{.*}} !alias.scope !34
; E's scope: !34 -> { 19(15), 35(33) }
; ^^^^^^
- %loadE = load i32, i32* %arrayidxE, align 4
+ %loadE = load i32, ptr %arrayidxE, align 4
%mulC = mul i32 %loadD, %loadE
- %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %ind
; CHECK: store i32 %mulC, {{.*}} !alias.scope !36, !noalias !38
; C's scope: !36 -> { 17(15), 37(33) }
; ^^^^^^
; C noalias D and E: !38 -> { 21(15), 32(33), 35(33) }
; ^^^^^^ ^^^^^^
- store i32 %mulC, i32* %arrayidxC, align 4
+ store i32 %mulC, ptr %arrayidxC, align 4
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @f(i32* %a, i32* %b, i32* %c) {
+define void @f(ptr %a, ptr %b, ptr %c) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
; CHECK: %loadA = {{.*}} !alias.scope !0
; A's scope: !0 -> { 1(2) }
- %loadA = load i32, i32* %arrayidxA, align 4
+ %loadA = load i32, ptr %arrayidxA, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %ind
; CHECK: %loadB = {{.*}} !alias.scope !3
; B's scope: !3 -> { 4(2) }
- %loadB = load i32, i32* %arrayidxB, align 4
+ %loadB = load i32, ptr %arrayidxB, align 4
%mulC = mul i32 %loadA, %loadB
- %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %ind
; CHECK: store {{.*}} !alias.scope !5, !noalias !7
; C noalias A and B: !7 -> { 1(2), 4(2) }
- store i32 %mulC, i32* %arrayidxC, align 4
+ store i32 %mulC, ptr %arrayidxC, align 4
%add = add nuw nsw i64 %ind, 1
%exitcond = icmp eq i64 %add, 20
; than ptrtoint.
%jl_value_t = type opaque
-%jl_array_t = type { i8 addrspace(13)*, i64, i16, i16, i32 }
+%jl_array_t = type { ptr addrspace(13), i64, i16, i16, i32 }
declare i64 @julia_steprange_last_4949()
-define void @"japi1_align!_9477"(%jl_value_t addrspace(10)** %arg) {
+define void @"japi1_align!_9477"(ptr %arg) {
; LV-LAVEL: L26.lver.check
; LV: [[OFMul:%[^ ]*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[Step:%[^ ]*]])
; LV-NEXT: [[OFMulResult:%[^ ]*]] = extractvalue { i64, i1 } [[OFMul]], 0
; LV-NEXT: [[OFMulOverflow:%[^ ]*]] = extractvalue { i64, i1 } [[OFMul]], 1
; LV: [[OFNegMulResult:%[^ ]*]] = sub i64 0, [[OFMulResult]]
-; LV-NEXT: [[NegGEP:%[^ ]*]] = getelementptr i8, i8 addrspace(13)* [[Base:%[^ ]*]], i64 [[OFNegMulResult]]
-; LV-NEXT: icmp ugt i8 addrspace(13)* [[NegGEP]], [[Base]]
+; LV-NEXT: [[NegGEP:%[^ ]*]] = getelementptr i8, ptr addrspace(13) [[Base:%[^ ]*]], i64 [[OFNegMulResult]]
+; LV-NEXT: icmp ugt ptr addrspace(13) [[NegGEP]], [[Base]]
; LV-NOT: inttoptr
; LV-NOT: ptrtoint
top:
- %tmp = load %jl_value_t addrspace(10)*, %jl_value_t addrspace(10)** %arg, align 8
- %tmp1 = load i32, i32* inttoptr (i64 12 to i32*), align 4
+ %tmp = load ptr addrspace(10), ptr %arg, align 8
+ %tmp1 = load i32, ptr inttoptr (i64 12 to ptr), align 4
%tmp2 = sub i32 0, %tmp1
%tmp3 = call i64 @julia_steprange_last_4949()
- %tmp4 = addrspacecast %jl_value_t addrspace(10)* %tmp to %jl_value_t addrspace(11)*
- %tmp5 = bitcast %jl_value_t addrspace(11)* %tmp4 to %jl_value_t addrspace(10)* addrspace(11)*
- %tmp6 = load %jl_value_t addrspace(10)*, %jl_value_t addrspace(10)* addrspace(11)* %tmp5, align 8
- %tmp7 = addrspacecast %jl_value_t addrspace(10)* %tmp6 to %jl_value_t addrspace(11)*
- %tmp8 = bitcast %jl_value_t addrspace(11)* %tmp7 to i32 addrspace(13)* addrspace(11)*
- %tmp9 = load i32 addrspace(13)*, i32 addrspace(13)* addrspace(11)* %tmp8, align 8
+ %tmp4 = addrspacecast ptr addrspace(10) %tmp to ptr addrspace(11)
+ %tmp6 = load ptr addrspace(10), ptr addrspace(11) %tmp4, align 8
+ %tmp7 = addrspacecast ptr addrspace(10) %tmp6 to ptr addrspace(11)
+ %tmp9 = load ptr addrspace(13), ptr addrspace(11) %tmp7, align 8
%tmp10 = sext i32 %tmp2 to i64
br label %L26
L26:
%value_phi3 = phi i64 [ 0, %top ], [ %tmp11, %L26 ]
%tmp11 = add i64 %value_phi3, -1
- %tmp12 = getelementptr inbounds i32, i32 addrspace(13)* %tmp9, i64 %tmp11
- %tmp13 = load i32, i32 addrspace(13)* %tmp12, align 4
+ %tmp12 = getelementptr inbounds i32, ptr addrspace(13) %tmp9, i64 %tmp11
+ %tmp13 = load i32, ptr addrspace(13) %tmp12, align 4
%tmp14 = add i64 %tmp11, %tmp10
- %tmp15 = getelementptr inbounds i32, i32 addrspace(13)* %tmp9, i64 %tmp14
- store i32 %tmp13, i32 addrspace(13)* %tmp15, align 4
+ %tmp15 = getelementptr inbounds i32, ptr addrspace(13) %tmp9, i64 %tmp14
+ store i32 %tmp13, ptr addrspace(13) %tmp15, align 4
%tmp16 = icmp eq i64 %value_phi3, %tmp3
br i1 %tmp16, label %L45, label %L26
i32 0, label %return
]
return: ; preds = %entry, %entry
- %result.0 = phi i32* [ null, %entry ], [ null, %entry ] ; <i32*> [#uses=0]
+ %result.0 = phi ptr [ null, %entry ], [ null, %entry ] ; <ptr> [#uses=0]
br label %UnifiedExitNode
UnifiedExitNode: ; preds = %return, %entry
ret void
entry:
%retval = alloca i32, align 4
%a.addr = alloca i32, align 4
- store i32 %a, i32* %a.addr, align 4
- %0 = load i32, i32* %a.addr, align 4
+ store i32 %a, ptr %a.addr, align 4
+ %0 = load i32, ptr %a.addr, align 4
switch i32 %0, label %sw.default [
i32 0, label %sw.bb
i32 1, label %sw.bb1
define i32 @foo(i32 %a) {
%1 = alloca i32, align 4
%2 = alloca i32, align 4
- store i32 %a, i32* %2, align 4
- %3 = load i32, i32* %2, align 4
+ store i32 %a, ptr %2, align 4
+ %3 = load i32, ptr %2, align 4
switch i32 %3, label %6 [
i32 0, label %4
i32 1, label %5
]
; <label>:4
- store i32 10, i32* %1
+ store i32 10, ptr %1
br label %7
; <label>:5
- store i32 3, i32* %1
+ store i32 3, ptr %1
br label %7
; <label>:6
unreachable
; <label>:7
- %8 = load i32, i32* %1
+ %8 = load i32, ptr %1
ret i32 %8
}
; RUN: opt %s -passes=lowerswitch -S | FileCheck %s
-define void @foo(i32 %x, i32* %p) {
+define void @foo(i32 %x, ptr %p) {
; Cases 2 and 4 are removed and become the new default case.
; It is now enough to use two icmps to lower the switch.
;
i32 5, label %bb1
]
bb0:
- store i32 0, i32* %p
+ store i32 0, ptr %p
br label %exit
bb1:
- store i32 1, i32* %p
+ store i32 1, ptr %p
br label %exit
popular:
- store i32 2, i32* %p
+ store i32 2, ptr %p
br label %exit
exit:
ret void
unreachable
}
-define void @unreachable_gap(i64 %x, i32* %p) {
+define void @unreachable_gap(i64 %x, ptr %p) {
; Cases 6 and INT64_MAX become the new default, but we still exploit the fact
; that 3-4 is unreachable, so four icmps is enough.
i64 9223372036854775807, label %bb4
]
bb0:
- store i32 0, i32* %p
+ store i32 0, ptr %p
br label %exit
bb1:
- store i32 1, i32* %p
+ store i32 1, ptr %p
br label %exit
bb2:
- store i32 2, i32* %p
+ store i32 2, ptr %p
br label %exit
bb3:
- store i32 3, i32* %p
+ store i32 3, ptr %p
br label %exit
bb4:
- store i32 4, i32* %p
+ store i32 4, ptr %p
br label %exit
exit:
ret void
-define void @nocases(i32 %x, i32* %p) {
+define void @nocases(i32 %x, ptr %p) {
; Don't fall over when there are no cases.
;
; CHECK-LABEL: @nocases
unreachable
}
-define void @nocasesleft(i32 %x, i32* %p) {
+define void @nocasesleft(i32 %x, ptr %p) {
; Cases 2 and 4 are removed and we are left with no cases.
;
; CHECK-LABEL: @nocasesleft
i32 4, label %popular
]
popular:
- store i32 2, i32* %p
+ store i32 2, ptr %p
br label %exit
exit:
ret void