; CHECK-LABEL: test1
; CHECK: add i32 %x, %x
entry:
- %x = load i32, i32* @y
- store atomic i32 %x, i32* @x unordered, align 4
- %y = load i32, i32* @y
+ %x = load i32, ptr @y
+ store atomic i32 %x, ptr @x unordered, align 4
+ %y = load i32, ptr @y
%z = add i32 %x, %y
ret i32 %z
}
; CHECK-LABEL: test3
; CHECK: add i32 %x, %x
entry:
- %x = load i32, i32* @y
- %y = load atomic i32, i32* @x unordered, align 4
- %z = load i32, i32* @y
+ %x = load i32, ptr @y
+ %y = load atomic i32, ptr @x unordered, align 4
+ %z = load i32, ptr @y
%a = add i32 %x, %z
%b = add i32 %y, %a
ret i32 %b
; CHECK-LABEL: test5
; CHECK: add i32 %x, %x
entry:
- %x = load atomic i32, i32* @x unordered, align 4
- %y = load i32, i32* @x
+ %x = load atomic i32, ptr @x unordered, align 4
+ %y = load i32, ptr @x
%z = add i32 %x, %y
ret i32 %z
}
; GVN unordered load to load (unordered load must not be removed)
define i32 @test6() nounwind uwtable ssp {
; CHECK-LABEL: test6
-; CHECK: load atomic i32, i32* @x unordered
+; CHECK: load atomic i32, ptr @x unordered
entry:
- %x = load i32, i32* @x
- %x2 = load atomic i32, i32* @x unordered, align 4
+ %x = load i32, ptr @x
+ %x2 = load atomic i32, ptr @x unordered, align 4
%x3 = add i32 %x, %x2
ret i32 %x3
}
; CHECK-LABEL: test7
; CHECK: add i32 %x, %y
entry:
- %x = load i32, i32* @y
- store atomic i32 %x, i32* @x release, align 4
- %w = load atomic i32, i32* @x acquire, align 4
- %y = load i32, i32* @y
+ %x = load i32, ptr @y
+ store atomic i32 %x, ptr @x release, align 4
+ %w = load atomic i32, ptr @x acquire, align 4
+ %y = load i32, ptr @y
%z = add i32 %x, %y
ret i32 %z
}
; CHECK-LABEL: test9
; CHECK: add i32 %x, %x
entry:
- %x = load i32, i32* @y
- store atomic i32 %x, i32* @x monotonic, align 4
- %y = load i32, i32* @y
+ %x = load i32, ptr @y
+ store atomic i32 %x, ptr @x monotonic, align 4
+ %y = load i32, ptr @y
%z = add i32 %x, %y
ret i32 %z
}
; CHECK-LABEL: test10
; CHECK: add i32 %x, %y
entry:
- %x = load atomic i32, i32* @y unordered, align 4
- %clobber = load atomic i32, i32* @x monotonic, align 4
- %y = load atomic i32, i32* @y monotonic, align 4
+ %x = load atomic i32, ptr @y unordered, align 4
+ %clobber = load atomic i32, ptr @x monotonic, align 4
+ %y = load atomic i32, ptr @y monotonic, align 4
%z = add i32 %x, %y
ret i32 %z
}
br i1 %flag, label %if.then, label %if.end
if.then:
- store i32 43, i32* @y, align 4
-; CHECK: store i32 43, i32* @y, align 4
+ store i32 43, ptr @y, align 4
+; CHECK: store i32 43, ptr @y, align 4
br label %if.end
if.end:
- load atomic i32, i32* @x acquire, align 4
- %load = load i32, i32* @y, align 4
-; CHECK: load atomic i32, i32* @x acquire, align 4
-; CHECK: load i32, i32* @y, align 4
+ load atomic i32, ptr @x acquire, align 4
+ %load = load i32, ptr @y, align 4
+; CHECK: load atomic i32, ptr @x acquire, align 4
+; CHECK: load i32, ptr @y, align 4
ret i32 %load
}
; CHECK-LABEL: @test12(
; Can't remove a load over a ordering barrier
-define i32 @test12(i1 %B, i32* %P1, i32* %P2) {
- %load0 = load i32, i32* %P1
- %1 = load atomic i32, i32* %P2 seq_cst, align 4
- %load1 = load i32, i32* %P1
+define i32 @test12(i1 %B, ptr %P1, ptr %P2) {
+ %load0 = load i32, ptr %P1
+ %1 = load atomic i32, ptr %P2 seq_cst, align 4
+ %load1 = load i32, ptr %P1
%sel = select i1 %B, i32 %load0, i32 %load1
ret i32 %sel
- ; CHECK: load i32, i32* %P1
- ; CHECK: load i32, i32* %P1
+ ; CHECK: load i32, ptr %P1
+ ; CHECK: load i32, ptr %P1
}
; CHECK-LABEL: @test13(
; atomic to non-atomic forwarding is legal
-define i32 @test13(i32* %P1) {
- %a = load atomic i32, i32* %P1 seq_cst, align 4
- %b = load i32, i32* %P1
+define i32 @test13(ptr %P1) {
+ %a = load atomic i32, ptr %P1 seq_cst, align 4
+ %b = load i32, ptr %P1
%res = sub i32 %a, %b
ret i32 %res
- ; CHECK: load atomic i32, i32* %P1
+ ; CHECK: load atomic i32, ptr %P1
; CHECK: ret i32 0
}
; CHECK-LABEL: @test13b(
-define i32 @test13b(i32* %P1) {
- store atomic i32 0, i32* %P1 unordered, align 4
- %b = load i32, i32* %P1
+define i32 @test13b(ptr %P1) {
+ store atomic i32 0, ptr %P1 unordered, align 4
+ %b = load i32, ptr %P1
ret i32 %b
; CHECK: ret i32 0
}
; CHECK-LABEL: @test14(
; atomic to unordered atomic forwarding is legal
-define i32 @test14(i32* %P1) {
- %a = load atomic i32, i32* %P1 seq_cst, align 4
- %b = load atomic i32, i32* %P1 unordered, align 4
+define i32 @test14(ptr %P1) {
+ %a = load atomic i32, ptr %P1 seq_cst, align 4
+ %b = load atomic i32, ptr %P1 unordered, align 4
%res = sub i32 %a, %b
ret i32 %res
- ; CHECK: load atomic i32, i32* %P1 seq_cst
+ ; CHECK: load atomic i32, ptr %P1 seq_cst
; CHECK-NEXT: ret i32 0
}
; CHECK-LABEL: @test15(
; implementation restriction: can't forward to stonger
; than unordered
-define i32 @test15(i32* %P1, i32* %P2) {
- %a = load atomic i32, i32* %P1 seq_cst, align 4
- %b = load atomic i32, i32* %P1 seq_cst, align 4
+define i32 @test15(ptr %P1, ptr %P2) {
+ %a = load atomic i32, ptr %P1 seq_cst, align 4
+ %b = load atomic i32, ptr %P1 seq_cst, align 4
%res = sub i32 %a, %b
ret i32 %res
- ; CHECK: load atomic i32, i32* %P1
- ; CHECK: load atomic i32, i32* %P1
+ ; CHECK: load atomic i32, ptr %P1
+ ; CHECK: load atomic i32, ptr %P1
}
; CHECK-LABEL: @test16(
; it would be legal to use the later value in place of the
; former in this particular example. We just don't
; do that right now.)
-define i32 @test16(i32* %P1, i32* %P2) {
- %a = load i32, i32* %P1, align 4
- %b = load atomic i32, i32* %P1 unordered, align 4
+define i32 @test16(ptr %P1, ptr %P2) {
+ %a = load i32, ptr %P1, align 4
+ %b = load atomic i32, ptr %P1 unordered, align 4
%res = sub i32 %a, %b
ret i32 %res
- ; CHECK: load i32, i32* %P1
- ; CHECK: load atomic i32, i32* %P1
+ ; CHECK: load i32, ptr %P1
+ ; CHECK: load atomic i32, ptr %P1
}
; CHECK-LABEL: @test16b(
-define i32 @test16b(i32* %P1) {
- store i32 0, i32* %P1
- %b = load atomic i32, i32* %P1 unordered, align 4
+define i32 @test16b(ptr %P1) {
+ store i32 0, ptr %P1
+ %b = load atomic i32, ptr %P1 unordered, align 4
ret i32 %b
- ; CHECK: load atomic i32, i32* %P1
+ ; CHECK: load atomic i32, ptr %P1
}
; Can't DSE across a full fence
-define void @fence_seq_cst_store(i32* %P1, i32* %P2) {
+define void @fence_seq_cst_store(ptr %P1, ptr %P2) {
; CHECK-LABEL: @fence_seq_cst_store(
; CHECK: store
; CHECK: store atomic
; CHECK: store
- store i32 0, i32* %P1, align 4
- store atomic i32 0, i32* %P2 seq_cst, align 4
- store i32 0, i32* %P1, align 4
+ store i32 0, ptr %P1, align 4
+ store atomic i32 0, ptr %P2 seq_cst, align 4
+ store i32 0, ptr %P1, align 4
ret void
}
; Can't DSE across a full fence
-define void @fence_seq_cst(i32* %P1, i32* %P2) {
+define void @fence_seq_cst(ptr %P1, ptr %P2) {
; CHECK-LABEL: @fence_seq_cst(
; CHECK: store
; CHECK: fence seq_cst
; CHECK: store
- store i32 0, i32* %P1, align 4
+ store i32 0, ptr %P1, align 4
fence seq_cst
- store i32 0, i32* %P1, align 4
+ store i32 0, ptr %P1, align 4
ret void
}
; Can't DSE across a full syncscope("singlethread") fence
-define void @fence_seq_cst_st(i32* %P1, i32* %P2) {
+define void @fence_seq_cst_st(ptr %P1, ptr %P2) {
; CHECK-LABEL: @fence_seq_cst_st(
; CHECK: store
; CHECK: fence syncscope("singlethread") seq_cst
; CHECK: store
- store i32 0, i32* %P1, align 4
+ store i32 0, ptr %P1, align 4
fence syncscope("singlethread") seq_cst
- store i32 0, i32* %P1, align 4
+ store i32 0, ptr %P1, align 4
ret void
}
; Can't DSE across a full fence
-define void @fence_asm_sideeffect(i32* %P1, i32* %P2) {
+define void @fence_asm_sideeffect(ptr %P1, ptr %P2) {
; CHECK-LABEL: @fence_asm_sideeffect(
; CHECK: store
; CHECK: call void asm sideeffect
; CHECK: store
- store i32 0, i32* %P1, align 4
+ store i32 0, ptr %P1, align 4
call void asm sideeffect "", ""()
- store i32 0, i32* %P1, align 4
+ store i32 0, ptr %P1, align 4
ret void
}
; Can't DSE across a full fence
-define void @fence_asm_memory(i32* %P1, i32* %P2) {
+define void @fence_asm_memory(ptr %P1, ptr %P2) {
; CHECK-LABEL: @fence_asm_memory(
; CHECK: store
; CHECK: call void asm
; CHECK: store
- store i32 0, i32* %P1, align 4
+ store i32 0, ptr %P1, align 4
call void asm "", "~{memory}"()
- store i32 0, i32* %P1, align 4
+ store i32 0, ptr %P1, align 4
ret void
}
; Can't remove a volatile load
-define i32 @volatile_load(i32* %P1, i32* %P2) {
- %a = load i32, i32* %P1, align 4
- %b = load volatile i32, i32* %P1, align 4
+define i32 @volatile_load(ptr %P1, ptr %P2) {
+ %a = load i32, ptr %P1, align 4
+ %b = load volatile i32, ptr %P1, align 4
%res = sub i32 %a, %b
ret i32 %res
; CHECK-LABEL: @volatile_load(
- ; CHECK: load i32, i32* %P1
- ; CHECK: load volatile i32, i32* %P1
+ ; CHECK: load i32, ptr %P1
+ ; CHECK: load volatile i32, ptr %P1
}
; Can't remove redundant volatile loads
-define i32 @redundant_volatile_load(i32* %P1, i32* %P2) {
- %a = load volatile i32, i32* %P1, align 4
- %b = load volatile i32, i32* %P1, align 4
+define i32 @redundant_volatile_load(ptr %P1, ptr %P2) {
+ %a = load volatile i32, ptr %P1, align 4
+ %b = load volatile i32, ptr %P1, align 4
%res = sub i32 %a, %b
ret i32 %res
; CHECK-LABEL: @redundant_volatile_load(
- ; CHECK: load volatile i32, i32* %P1
- ; CHECK: load volatile i32, i32* %P1
+ ; CHECK: load volatile i32, ptr %P1
+ ; CHECK: load volatile i32, ptr %P1
; CHECK: sub
}
; Can't DSE a volatile store
-define void @volatile_store(i32* %P1, i32* %P2) {
+define void @volatile_store(ptr %P1, ptr %P2) {
; CHECK-LABEL: @volatile_store(
; CHECK: store volatile
; CHECK: store
- store volatile i32 0, i32* %P1, align 4
- store i32 3, i32* %P1, align 4
+ store volatile i32 0, ptr %P1, align 4
+ store i32 3, ptr %P1, align 4
ret void
}
; Can't DSE a redundant volatile store
-define void @redundant_volatile_store(i32* %P1, i32* %P2) {
+define void @redundant_volatile_store(ptr %P1, ptr %P2) {
; CHECK-LABEL: @redundant_volatile_store(
; CHECK: store volatile
; CHECK: store volatile
- store volatile i32 0, i32* %P1, align 4
- store volatile i32 0, i32* %P1, align 4
+ store volatile i32 0, ptr %P1, align 4
+ store volatile i32 0, ptr %P1, align 4
ret void
}
; Can value forward from volatiles
-define i32 @test20(i32* %P1, i32* %P2) {
- %a = load volatile i32, i32* %P1, align 4
- %b = load i32, i32* %P1, align 4
+define i32 @test20(ptr %P1, ptr %P2) {
+ %a = load volatile i32, ptr %P1, align 4
+ %b = load i32, ptr %P1, align 4
%res = sub i32 %a, %b
ret i32 %res
; CHECK-LABEL: @test20(
- ; CHECK: load volatile i32, i32* %P1
+ ; CHECK: load volatile i32, ptr %P1
; CHECK: ret i32 0
}
; We're currently conservative about widening
-define i64 @widen1(i32* %P1) {
+define i64 @widen1(ptr %P1) {
; CHECK-LABEL: @widen1(
- ; CHECK: load atomic i32, i32* %P1
- ; CHECK: load atomic i64, i64* %p2
- %p2 = bitcast i32* %P1 to i64*
- %a = load atomic i32, i32* %P1 unordered, align 4
- %b = load atomic i64, i64* %p2 unordered, align 4
+ ; CHECK: load atomic i32, ptr %P1
+ ; CHECK: load atomic i64, ptr %P1
+ %a = load atomic i32, ptr %P1 unordered, align 4
+ %b = load atomic i64, ptr %P1 unordered, align 4
%a64 = sext i32 %a to i64
%res = sub i64 %a64, %b
ret i64 %res
}
; narrowing does work
-define i64 @narrow(i32* %P1) {
+define i64 @narrow(ptr %P1) {
; CHECK-LABEL: @narrow(
- ; CHECK: load atomic i64, i64* %p2
- ; CHECK-NOT: load atomic i32, i32* %P1
- %p2 = bitcast i32* %P1 to i64*
- %a64 = load atomic i64, i64* %p2 unordered, align 4
- %b = load atomic i32, i32* %P1 unordered, align 4
+ ; CHECK: load atomic i64, ptr %P1
+ ; CHECK-NOT: load atomic i32, ptr %P1
+ %a64 = load atomic i64, ptr %P1 unordered, align 4
+ %b = load atomic i32, ptr %P1 unordered, align 4
%b64 = sext i32 %b to i64
%res = sub i64 %a64, %b64
ret i64 %res
}
; Missed optimization, we don't yet optimize ordered loads
-define i64 @narrow2(i32* %P1) {
+define i64 @narrow2(ptr %P1) {
; CHECK-LABEL: @narrow2(
- ; CHECK: load atomic i64, i64* %p2
- ; CHECK: load atomic i32, i32* %P1
- %p2 = bitcast i32* %P1 to i64*
- %a64 = load atomic i64, i64* %p2 acquire, align 4
- %b = load atomic i32, i32* %P1 acquire, align 4
+ ; CHECK: load atomic i64, ptr %P1
+ ; CHECK: load atomic i32, ptr %P1
+ %a64 = load atomic i64, ptr %P1 acquire, align 4
+ %b = load atomic i32, ptr %P1 acquire, align 4
%b64 = sext i32 %b to i64
%res = sub i64 %a64, %b64
ret i64 %res
; are here only to show that we haven't obviously broken anything.
; unordered atomic to unordered atomic
-define i32 @non_local_fre(i32* %P1) {
+define i32 @non_local_fre(ptr %P1) {
; CHECK-LABEL: @non_local_fre(
-; CHECK: load atomic i32, i32* %P1
+; CHECK: load atomic i32, ptr %P1
; CHECK: ret i32 0
; CHECK: ret i32 0
- %a = load atomic i32, i32* %P1 unordered, align 4
+ %a = load atomic i32, ptr %P1 unordered, align 4
%cmp = icmp eq i32 %a, 0
br i1 %cmp, label %early, label %next
early:
ret i32 %a
next:
- %b = load atomic i32, i32* %P1 unordered, align 4
+ %b = load atomic i32, ptr %P1 unordered, align 4
%res = sub i32 %a, %b
ret i32 %res
}
; unordered atomic to non-atomic
-define i32 @non_local_fre2(i32* %P1) {
+define i32 @non_local_fre2(ptr %P1) {
; CHECK-LABEL: @non_local_fre2(
-; CHECK: load atomic i32, i32* %P1
+; CHECK: load atomic i32, ptr %P1
; CHECK: ret i32 0
; CHECK: ret i32 0
- %a = load atomic i32, i32* %P1 unordered, align 4
+ %a = load atomic i32, ptr %P1 unordered, align 4
%cmp = icmp eq i32 %a, 0
br i1 %cmp, label %early, label %next
early:
ret i32 %a
next:
- %b = load i32, i32* %P1
+ %b = load i32, ptr %P1
%res = sub i32 %a, %b
ret i32 %res
}
; Can't forward ordered atomics.
-define i32 @non_local_fre3(i32* %P1) {
+define i32 @non_local_fre3(ptr %P1) {
; CHECK-LABEL: @non_local_fre3(
-; CHECK: load atomic i32, i32* %P1 acquire
+; CHECK: load atomic i32, ptr %P1 acquire
; CHECK: ret i32 0
-; CHECK: load atomic i32, i32* %P1 acquire
+; CHECK: load atomic i32, ptr %P1 acquire
; CHECK: ret i32 %res
- %a = load atomic i32, i32* %P1 acquire, align 4
+ %a = load atomic i32, ptr %P1 acquire, align 4
%cmp = icmp eq i32 %a, 0
br i1 %cmp, label %early, label %next
early:
ret i32 %a
next:
- %b = load atomic i32, i32* %P1 acquire, align 4
+ %b = load atomic i32, ptr %P1 acquire, align 4
%res = sub i32 %a, %b
ret i32 %res
}
declare void @clobber()
; unordered atomic to unordered atomic
-define i32 @non_local_pre(i32* %P1) {
+define i32 @non_local_pre(ptr %P1) {
; CHECK-LABEL: @non_local_pre(
-; CHECK: load atomic i32, i32* %P1 unordered
-; CHECK: load atomic i32, i32* %P1 unordered
+; CHECK: load atomic i32, ptr %P1 unordered
+; CHECK: load atomic i32, ptr %P1 unordered
; CHECK: %b = phi i32 [ %b.pre, %early ], [ %a, %0 ]
; CHECK: ret i32 %b
- %a = load atomic i32, i32* %P1 unordered, align 4
+ %a = load atomic i32, ptr %P1 unordered, align 4
%cmp = icmp eq i32 %a, 0
br i1 %cmp, label %early, label %next
early:
call void @clobber()
br label %next
next:
- %b = load atomic i32, i32* %P1 unordered, align 4
+ %b = load atomic i32, ptr %P1 unordered, align 4
ret i32 %b
}
; unordered atomic to non-atomic
-define i32 @non_local_pre2(i32* %P1) {
+define i32 @non_local_pre2(ptr %P1) {
; CHECK-LABEL: @non_local_pre2(
-; CHECK: load atomic i32, i32* %P1 unordered
-; CHECK: load i32, i32* %P1
+; CHECK: load atomic i32, ptr %P1 unordered
+; CHECK: load i32, ptr %P1
; CHECK: %b = phi i32 [ %b.pre, %early ], [ %a, %0 ]
; CHECK: ret i32 %b
- %a = load atomic i32, i32* %P1 unordered, align 4
+ %a = load atomic i32, ptr %P1 unordered, align 4
%cmp = icmp eq i32 %a, 0
br i1 %cmp, label %early, label %next
early:
call void @clobber()
br label %next
next:
- %b = load i32, i32* %P1
+ %b = load i32, ptr %P1
ret i32 %b
}
; non-atomic to unordered atomic - can't forward!
-define i32 @non_local_pre3(i32* %P1) {
+define i32 @non_local_pre3(ptr %P1) {
; CHECK-LABEL: @non_local_pre3(
-; CHECK: %a = load i32, i32* %P1
-; CHECK: %b = load atomic i32, i32* %P1 unordered
+; CHECK: %a = load i32, ptr %P1
+; CHECK: %b = load atomic i32, ptr %P1 unordered
; CHECK: ret i32 %b
- %a = load i32, i32* %P1
+ %a = load i32, ptr %P1
%cmp = icmp eq i32 %a, 0
br i1 %cmp, label %early, label %next
early:
call void @clobber()
br label %next
next:
- %b = load atomic i32, i32* %P1 unordered, align 4
+ %b = load atomic i32, ptr %P1 unordered, align 4
ret i32 %b
}
; ordered atomic to ordered atomic - can't forward
-define i32 @non_local_pre4(i32* %P1) {
+define i32 @non_local_pre4(ptr %P1) {
; CHECK-LABEL: @non_local_pre4(
-; CHECK: %a = load atomic i32, i32* %P1 seq_cst
-; CHECK: %b = load atomic i32, i32* %P1 seq_cst
+; CHECK: %a = load atomic i32, ptr %P1 seq_cst
+; CHECK: %b = load atomic i32, ptr %P1 seq_cst
; CHECK: ret i32 %b
- %a = load atomic i32, i32* %P1 seq_cst, align 4
+ %a = load atomic i32, ptr %P1 seq_cst, align 4
%cmp = icmp eq i32 %a, 0
br i1 %cmp, label %early, label %next
early:
call void @clobber()
br label %next
next:
- %b = load atomic i32, i32* %P1 seq_cst, align 4
+ %b = load atomic i32, ptr %P1 seq_cst, align 4
ret i32 %b
}
; can't remove volatile on any path
-define i32 @non_local_pre5(i32* %P1) {
+define i32 @non_local_pre5(ptr %P1) {
; CHECK-LABEL: @non_local_pre5(
-; CHECK: %a = load atomic i32, i32* %P1 seq_cst
-; CHECK: %b = load volatile i32, i32* %P1
+; CHECK: %a = load atomic i32, ptr %P1 seq_cst
+; CHECK: %b = load volatile i32, ptr %P1
; CHECK: ret i32 %b
- %a = load atomic i32, i32* %P1 seq_cst, align 4
+ %a = load atomic i32, ptr %P1 seq_cst, align 4
%cmp = icmp eq i32 %a, 0
br i1 %cmp, label %early, label %next
early:
call void @clobber()
br label %next
next:
- %b = load volatile i32, i32* %P1
+ %b = load volatile i32, ptr %P1
ret i32 %b
}
; ordered atomic to unordered atomic
-define i32 @non_local_pre6(i32* %P1) {
+define i32 @non_local_pre6(ptr %P1) {
; CHECK-LABEL: @non_local_pre6(
-; CHECK: load atomic i32, i32* %P1 seq_cst
-; CHECK: load atomic i32, i32* %P1 unordered
+; CHECK: load atomic i32, ptr %P1 seq_cst
+; CHECK: load atomic i32, ptr %P1 unordered
; CHECK: %b = phi i32 [ %b.pre, %early ], [ %a, %0 ]
; CHECK: ret i32 %b
- %a = load atomic i32, i32* %P1 seq_cst, align 4
+ %a = load atomic i32, ptr %P1 seq_cst, align 4
%cmp = icmp eq i32 %a, 0
br i1 %cmp, label %early, label %next
early:
call void @clobber()
br label %next
next:
- %b = load atomic i32, i32* %P1 unordered, align 4
+ %b = load atomic i32, ptr %P1 unordered, align 4
ret i32 %b
}
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -O3 -S %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-declare noalias i8* @_Znam(i64) #1
+declare noalias ptr @_Znam(i64) #1
define i32 @TestNoAsan() {
; CHECK-LABEL: @TestNoAsan(
; CHECK-NEXT: ret i32 0
;
bb:
- %i = tail call noalias i8* @_Znam(i64 2)
- %i1 = getelementptr inbounds i8, i8* %i, i64 1
- store i8 0, i8* %i1, align 1
- store i8 0, i8* %i, align 1
- %i2 = bitcast i8* %i to i16*
- %i3 = load i16, i16* %i2, align 4
+ %i = tail call noalias ptr @_Znam(i64 2)
+ %i1 = getelementptr inbounds i8, ptr %i, i64 1
+ store i8 0, ptr %i1, align 1
+ store i8 0, ptr %i, align 1
+ %i3 = load i16, ptr %i, align 4
%i4 = icmp eq i16 %i3, 0
br i1 %i4, label %bb10, label %bb5
bb5: ; preds = %bb
- %i6 = getelementptr inbounds i8, i8* %i, i64 2
- %i7 = bitcast i8* %i6 to i16*
- %i8 = load i16, i16* %i7, align 2
+ %i6 = getelementptr inbounds i8, ptr %i, i64 2
+ %i8 = load i16, ptr %i6, align 2
%i9 = sext i16 %i8 to i32
br label %bb10
define i32 @TestAsan() sanitize_address {
; CHECK-LABEL: @TestAsan(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[I:%.*]] = tail call noalias dereferenceable_or_null(2) i8* @_Znam(i64 2)
-; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds i8, i8* [[I]], i64 1
-; CHECK-NEXT: store i8 0, i8* [[I1]], align 1
-; CHECK-NEXT: store i8 0, i8* [[I]], align 1
-; CHECK-NEXT: [[I2:%.*]] = bitcast i8* [[I]] to i16*
-; CHECK-NEXT: [[I3:%.*]] = load i16, i16* [[I2]], align 4
+; CHECK-NEXT: [[I:%.*]] = tail call noalias dereferenceable_or_null(2) ptr @_Znam(i64 2)
+; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds i8, ptr [[I]], i64 1
+; CHECK-NEXT: store i8 0, ptr [[I1]], align 1
+; CHECK-NEXT: store i8 0, ptr [[I]], align 1
+; CHECK-NEXT: [[I3:%.*]] = load i16, ptr [[I]], align 4
; CHECK-NEXT: [[I4:%.*]] = icmp eq i16 [[I3]], 0
; CHECK-NEXT: br i1 [[I4]], label [[BB10:%.*]], label [[BB5:%.*]]
; CHECK: bb5:
-; CHECK-NEXT: [[I6:%.*]] = getelementptr inbounds i8, i8* [[I]], i64 2
-; CHECK-NEXT: [[I7:%.*]] = bitcast i8* [[I6]] to i16*
-; CHECK-NEXT: [[I8:%.*]] = load i16, i16* [[I7]], align 2
+; CHECK-NEXT: [[I6:%.*]] = getelementptr inbounds i8, ptr [[I]], i64 2
+; CHECK-NEXT: [[I8:%.*]] = load i16, ptr [[I6]], align 2
; CHECK-NEXT: [[I9:%.*]] = sext i16 [[I8]] to i32
; CHECK-NEXT: br label [[BB10]]
; CHECK: bb10:
; CHECK-NEXT: ret i32 [[I11]]
;
bb:
- %i = tail call noalias i8* @_Znam(i64 2)
- %i1 = getelementptr inbounds i8, i8* %i, i64 1
- store i8 0, i8* %i1, align 1
- store i8 0, i8* %i, align 1
- %i2 = bitcast i8* %i to i16*
- %i3 = load i16, i16* %i2, align 4
+ %i = tail call noalias ptr @_Znam(i64 2)
+ %i1 = getelementptr inbounds i8, ptr %i, i64 1
+ store i8 0, ptr %i1, align 1
+ store i8 0, ptr %i, align 1
+ %i3 = load i16, ptr %i, align 4
%i4 = icmp eq i16 %i3, 0
br i1 %i4, label %bb10, label %bb5
bb5: ; preds = %bb
- %i6 = getelementptr inbounds i8, i8* %i, i64 2
- %i7 = bitcast i8* %i6 to i16*
- %i8 = load i16, i16* %i7, align 2
+ %i6 = getelementptr inbounds i8, ptr %i, i64 2
+ %i8 = load i16, ptr %i6, align 2
%i9 = sext i16 %i8 to i32
br label %bb10
define i32 @TestHWAsan() sanitize_hwaddress {
; CHECK-LABEL: @TestHWAsan(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[I:%.*]] = tail call noalias dereferenceable_or_null(2) i8* @_Znam(i64 2)
-; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds i8, i8* [[I]], i64 1
-; CHECK-NEXT: store i8 0, i8* [[I1]], align 1
-; CHECK-NEXT: store i8 0, i8* [[I]], align 1
-; CHECK-NEXT: [[I2:%.*]] = bitcast i8* [[I]] to i16*
-; CHECK-NEXT: [[I3:%.*]] = load i16, i16* [[I2]], align 4
+; CHECK-NEXT: [[I:%.*]] = tail call noalias dereferenceable_or_null(2) ptr @_Znam(i64 2)
+; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds i8, ptr [[I]], i64 1
+; CHECK-NEXT: store i8 0, ptr [[I1]], align 1
+; CHECK-NEXT: store i8 0, ptr [[I]], align 1
+; CHECK-NEXT: [[I3:%.*]] = load i16, ptr [[I]], align 4
; CHECK-NEXT: [[I4:%.*]] = icmp eq i16 [[I3]], 0
; CHECK-NEXT: br i1 [[I4]], label [[BB10:%.*]], label [[BB5:%.*]]
; CHECK: bb5:
-; CHECK-NEXT: [[I6:%.*]] = getelementptr inbounds i8, i8* [[I]], i64 2
-; CHECK-NEXT: [[I7:%.*]] = bitcast i8* [[I6]] to i16*
-; CHECK-NEXT: [[I8:%.*]] = load i16, i16* [[I7]], align 2
+; CHECK-NEXT: [[I6:%.*]] = getelementptr inbounds i8, ptr [[I]], i64 2
+; CHECK-NEXT: [[I8:%.*]] = load i16, ptr [[I6]], align 2
; CHECK-NEXT: [[I9:%.*]] = sext i16 [[I8]] to i32
; CHECK-NEXT: br label [[BB10]]
; CHECK: bb10:
; CHECK-NEXT: ret i32 [[I11]]
;
bb:
- %i = tail call noalias i8* @_Znam(i64 2)
- %i1 = getelementptr inbounds i8, i8* %i, i64 1
- store i8 0, i8* %i1, align 1
- store i8 0, i8* %i, align 1
- %i2 = bitcast i8* %i to i16*
- %i3 = load i16, i16* %i2, align 4
+ %i = tail call noalias ptr @_Znam(i64 2)
+ %i1 = getelementptr inbounds i8, ptr %i, i64 1
+ store i8 0, ptr %i1, align 1
+ store i8 0, ptr %i, align 1
+ %i3 = load i16, ptr %i, align 4
%i4 = icmp eq i16 %i3, 0
br i1 %i4, label %bb10, label %bb5
bb5: ; preds = %bb
- %i6 = getelementptr inbounds i8, i8* %i, i64 2
- %i7 = bitcast i8* %i6 to i16*
- %i8 = load i16, i16* %i7, align 2
+ %i6 = getelementptr inbounds i8, ptr %i, i64 2
+ %i8 = load i16, ptr %i6, align 2
%i9 = sext i16 %i8 to i32
br label %bb10